af4ff571dac4eba29c3594284aa65bda92a88276ae7950eaab0be7141dcbeee0d3730c6e4e50dbcfea7c01a069623f4b113a672d4933212d102e3ba406e57001d9f2179773faa476096dc72535c5b3eab99ad7f615b1b29ad1a706c30835a3c874e901094640d4a7fe943d6f68f05cd3af5dbf113de2e6581f28542789e7de78f64bac580502d266337494d0ea94a7299185bcf1391342de89c2d61c33e7fe11c94d2a64c6190a906b0d05416c7e1b4600b0c870625d1e3fe590861ad1ad3ff82378f02dd556a39f925a8a74514d7d439a183949cce31d3dfa741ec91bb35f547427fbbed28d96e3f0690f77716a0342d9233adac93a2b0ae919ae9eac26b2f5cbcc8ec889fdc85c96c880dd43d93e331be01304425810e7dca66618d41a09cb5cccc8886ba3c7f8c89fe2bc89d1148ea0923355d1579da3580ba31dfe83a979a818fb45a524235f03a118a38d6fced85baa9180d6a8a1042d4b9e25d2bd59eccd34bbe7c40b16551d9a8544e9d4124a2bb98926b32700d1bce84283a8b45931b01a13d0ff0bb26eff5f8977cd6ee41b93d1ed03320a89dff9e5946aae01f0cb46c0d0e423a04f154705b3dc865269562ac15723e24de51f0fd5cde24ce91afdc47180fa9e904838a3bb2f3667c2618b0423175914175e0fa05b6cc4812b8b83850517a9fdda67e5f0c832b5844b42a111619b8d338e272bc98d9ca32400261469e7c34504d84e71344752b1698680ffcdfd170784e9d43f5f1117894f3b7e0523ab398d8f65672a95a984d5dde6d416cd6209ff792aafa919ecc45c1a26f5b1144cbe2217b7b4a1c44dd2f3e279d0630ef1485c689ec74f6ccfad58d02c607d27511b6b45c14a3d3ac174e9cc2d97ec9550aa6a6115f5c16005afa210d7b5e5bd3727d7778ff7ea4dbe88ace6e8006b6c17475c042138c29d3b038f5c3e6fbe0daa6961fed681934dc00ac92f62ecaa651db6e695d53b20bc4dc8bc11291add3f2ccb9e64b6f5e3b34c4535d3a9bdc95a8482bddd84e96760f8e3b81d453a58d7b07b1d5be67d9283342b565829551c0842f750bc3fb9f0a7c6e5659d825c0bef3bc661a63d1f9239cecf08223f4726c5325726a322183401bb61245fa97ea377be2bed5d28896799e98568d7cd7f181d5df50670aca2619fb473a0883c3a493ec820dc8d100ea012286336c71e68cfd54e27885c42c19dbb73ec7e19956f9c2f9cbad25a31fa5d18edacf0310e5f7b5e3b565dc455a927e4a0d94903d5f9f4b85dd8150d290ebd6043c2397324655577f08df92e143250f6f1dc872f8f61a883675dec8128813c63b6de91ff2b16cb689e2b3f099fdbaedfece5dc87894fbade21057bf1ac2b59bb4291b29023734697c8527fc56487b1ccf6d0eeea2cda6de36d5582c98ff3e9c6ea6fb1270260ce29c5b4ad28e5c67677179eda5749f49cddd8261af00c5400f65ba36bb806d886cd475fa8af8c5c2e6806b11bf23c3d0c97462df6b7eb5abbf031f1c9e6c13482c78f173f05bc3729c305ad06946250051e3406f2e0d7929329d269f7ebcbde4cabd956536bb43bd72eafcc8737ed33d2b699942b60abb4f6e50e25550499943c9947a43fbc2e705a6e2f76fc2bb013832797c8e6c22c803fd06976b62fd8ebc656c25c5240bcd327a925a0dc5f69d679a23613a4b0e2b5f79dc8e5be01fff3709accc3c58a4877ed0b100eaebd5fa83d1d830a700054dc157053d5b10a1e76e40a898b6690e66d8c5e6ae5a28a4609317ebea0c90ebc4fd57055a3b7d6ea9746852aca59a302529124ca79ccef96c06e789226b284a978143782e1ce3c43f35ab993bebcf02b2c114ebf17b494d2e418b398e5c35fa46f107bad3c84c4b00698911a040a7aa053bbb4d3ea0f0e21190c3342ad7226824eba5dee5b0630e97f58471685c3319ac83a5a0e38067a47d6cd87713bfd918a4674a6fdb8eec77f14f8040ebf374d78dfb5b1473284fe833d0cfac434026745960943f3984dc9abc831e01c9d3993658d1ff34309fff36a21298f779fea624e5ce64f0d5b68fac739abc8c9bf6fe77280716a8a46fe53709b230d549f1df685a68fdab8013fbd1ddb077973e1cfc5aa7cc0b64a9cd98703132f42e29f99909fa4cecf20a358c6ef9c81210bb8cfdd410c45c74cdcd34638931baee2174b81243eb647035c79f70561280ca3d130515f5858001c5e68588bfdd27cbf0fb0681b85e753df71a8d808197be10517ab28e6563c074c9b27b19f1ee906e69ccd9a6e891aa5238411116fef43d1e7f0d557edf9c6d2fcc8b34f25865c316e3075cf859bbe95fd7af00c53cea415e61aea89eeb9ae7b5718ab2b048bac3f31047722a734cfe7a31018453306049b110439bf9e6e590ff1fa2caf19b0e766e70e7e1126f8b7f15c3bf4cad1b6cc03e5cb0a9e9d268b951c9493552e99ef445cf41f1b9b69667f8fdf4b98e8ad356bb1935b4c67df15bf51b52cefd0a0029e425aa3fb45e94c292c01ccb172ca69f19f35df5563c64ef370fd02c29c693eb734b068b29339aa0dac0addd33aa82de871a8a294f9124ac755b21ad9a0437fac1714d2842d74c9abd6ce6987ce70085b07269f96d5a7e3523881fbeee139973982cf266877bcc791aa03721249e6a314d0403fcfecffaee51fef74a7d2dd68d5f799747fd0013048b3ca05f6427f3fefef2a21a1fc96e97dadb10fc3f06b813cfaaa7a8ff9fe355cb7dbdd5b319d1cc6433c7f177662e8a3d1b21e395f3f394748842abc0e59cc486bf72b162f72f8fdebaa4018d96052c62a63b53ba19f3e3e3ee25c95e512cc884d0a5c126a279efd03934198fa511badb114decfd6f4a7faa0b4fba1e2712309579e7fd8b802811f3c7bee088965dc90322cf436d39c59e19089982eb249165b4fd8f8cafb47310777a497db3205a43454ab5e4e6f5db582888e9f283a88ed03c8956f37b4a7bcf38de6d97d00ee2a1348b0556c9afdb2eafaa1b63e3ba47704f69148eb8a635ba3f151ad6875bc83136d7e4aaaa56594862c1cc1630ea346b14198e77a09fbea0ba727ead4d728024da71c2390f414fbfffea0923c26e4eac8ef558a17dc6c4d8654a6d260e6350582f8df8d7d7a1ba3026211d6eda190080c28e3c38e28a21121381edc17b6d42bc9101e46509c8ba3662e607ac5b398f54f401a6f080945b42559296c4a4f938d23a1f935619d1666cf027bb6e9cdca38bebdca5912d5db90b9feda7399103c801504844e9989a068682dbb8c1401415739cc27f7c6ed9f9f3eac9b2736543dde32f0e22e2664eb8b8dfb97bf334599853414a04447fba4c86716e3db060750473eb8f58540d8b746ff4d31e21f664d1857141461c83604ef5120d9f619058997e701f954e455c81b90535a0340a120ca301d4b518452d34d052eceb0bb75596a9c635b7187bd89a701207aac1848a1d4d81c10a3f100fc6f03259e3768e30a37234aeb446c5174a5cf567b0e491dadc6b0866af1d6631d307343a2e0c3ec4f7627fc9c8b966b907708baad9dad18e71cdf94151489cfdc7a93914f02775c9c16a30150dd60748a82afcfd3b8654984f045ff7f5467febadcaa0b81932c4cb56e0a71dc03b8520f23d93e299681f3dd84eb0c2f9b7d0b819d53136dd366745407819c816299662bdede2109300c4821c62c3c4cf4984d02e98f213d2fba8460463748f36f1a3ba39f691b1185bd772b599fdd37cd3e4259d6837a3b3a6b8443de4b83094a34648fe580dab808a9621be4e7c0fac34f2c86d09234ea36364646541edb7c99c0e20756d594cdf7eb43625c3232d393262e7a7c35e7f9f7ad0673b41c88727d3eefb2e76aa4dbdd720c5b0f2db493d6cecfac17972103e38306a0c0342ed014963bf8a45c900406ef663301b7c0e3179491559c1f30a58175d51a35c4fcb7a6099ecbdb7933248c295c476505f88c67baccc211d755a2349f9b4f07e8248cfcce82a1bd3fbbe26e34bd55c297ed356b2c5b63305bf81a404d561fa69509537f3709aee8ccb48e3387fe6aa3489b0c8a6cde1a5138078d5480a0d644672572ad92aeb4d2eb0ad727fc85bd1f0401a20644f6cc5c9d43"}}}, 0x1082) r1 = epoll_create(0x30a33faa) write$binfmt_elf64(r1, &(0x7f0000000040)={{0x7f, 0x45, 0x4c, 0x46, 0x4, 0x6, 0x9, 0xff, 0xfffffffffffffffa, 0x3, 0x3, 0x81, 0xf6, 0x40, 0x15c, 0x6, 0x8, 0x38, 0x2, 0x4, 0x8000}, [{0x60000000, 0x1, 0x9, 0x1, 0x3, 0x2, 0x9, 0x4}], "d41d3856bc99ddbdca16aaf351d4a0a8a6050ecf01186a8c7034936ab0f13cf8b82f7291685263ec514c62008d52156360ecb31500e29ca55b84337cbe3478cbd6bdd26ceb0895c7593472fe3aab1edd4f148442791cb6059024140da598796dfff761c9210cc0f29e04", ['\x00', '\x00', '\x00', '\x00', '\x00', '\x00']}, 0x6e2) [ 2604.644308][T26080] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:46:38 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000005db3ffffff200000000200000000000000000000000800655800000000"], 0x0) (async, rerun: 32) pipe(&(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) (rerun: 32) write$tun(r0, &(0x7f0000000880)={@void, @val={0x2, 0x84, 0x7, 0xa9, 0x4}, @ipv4=@igmp={{0x1c, 0x4, 0x0, 0x1, 0x1078, 0x67, 0x0, 0x5, 0x2, 0x0, @multicast2, @initdev={0xac, 0x1e, 0x1, 0x0}, {[@noop, @timestamp_addr={0x44, 0x4c, 0x16, 0x1, 0x6, [{@dev={0xac, 0x14, 0x14, 0x22}, 0x80000000}, {@broadcast, 0x3}, {@multicast2, 0x80}, {@empty, 0x8d}, {@broadcast, 0x8}, {@local, 0xfffff800}, {@broadcast, 0x2}, {@remote, 0x2}, {@dev={0xac, 0x14, 0x14, 0x28}, 0x1}]}, @cipso={0x86, 0xd, 0x0, [{0x5, 0x7, "9e4b5cac51"}]}]}}, {0x14, 0x4, 0x0, @loopback, "5b0c8fdfe41179c96b43141636ece85e4a5fedd99ff0984ab3d8f7fd8d94d05c52e0046a5854e4ff23d6185951917720150a6a242feb47d49540c5ebfeaffe945beeebeafeded506fd9006cf1f4c1efdc37bb0513ebb4feaec01136c6255e81260077b733d20eebf99e9be4c93e201313729b220f34a026cfe85d4f9aba785775a8583108e7d9437eac3896b102b85eeee63f244f9eddaad8f885eea8c43147479352009a7f9f9c0086b56eb7632542944ceba49505eab3a8113e87151aeac89e99ded554cb415c75a8abf6a690659ddcf706834903eba59ccf3760d1a4d1c7f4331cd422f1beea2385c323e40ad455c4b9d9f9230fd61e42a61a41dbb434ddfa46bdd732f42ee4fcc155ca6c5b51ac12e3aaf67f866e526e9ad70702a74bbd8abc7f3a0385a1f959af5a72fa75d29345c5e5b54b647cc0b764f43d383c4269c3ef4e96c8163039644becf2a3972a3630f1ca956082bda234f3408a0dfb5ac6dbb500f3b99a477a6f5b82e8734a3568485573bce4e601ecf7c0b523153fbc6aa9298a1b7829ded206e871741d73e0189cedbcbacab2c97449777f7e31baa301538befe8c0634457eaa9846bd6744b273c62c2f90ca6b9a25320455548f560ad6ee37724feab72b83bf0d2db214d7f9faf676ac26f9991a3699e26425bdf24aa77a325899bb2742efc1b9aca65943cb6a80591a57fa71c9dd05d0237720f4845e5eb23a7f3c0539d09e87b1955fc10bb45b6427b47c5290cfb192d33d4165ebaa91840418eaca1893774531a289267055e6ec6d2f798254b85a6ba528a81d33745aec289f447fc83bcef57b56c4b586c5e79ba03b5cad6df5d5dde61948ae52ff6f9ffc8d6881f44667644a3f8dc714f7cca82aae2c31ee2b7fecb6bb1c05ffa8658ac29322335f41a69465a4c2cb1cdbb5fcbdd1a2ca728261337315f0d5d35d77b9c6ca8a803395d49d46162f39a8f390e8103c47561f317fbe04a8135056137a42a06db0b82a330ef051e936918842ffa057669aec242a089407d85a9a4f537f7a863aa48ed104f9224e745143e1390b6d77528696d4d1781fa339e96eb744a0a162c251f431d1495cbc99a9751dc82c18fc8adde740bbe487f903440b4af55249b97469911aeb5971fc3d449a42ba1af9b89556a524e486ee534bf0a5a6b486b32e9d4b68258c158ca9e5d6ee797e11f97f599da564f29d7312379911952b31c40ed07e4bfc3a0a3183accf627081391f06f4f44ea8def4d7d475b448a0bfbebf4963084cc78921271b5e19463a5c07c0de78daef3ed590e967e869e9675ad09f683e35a660b641305eac879339e27a2010cef564a383c9dd3e26ccdca663449f24aae1bffd14eaae46394c69d5dc6ab610eecdcf88a63f6122a5e5fd3c1adaa157da04870c93b220d5de175c231d2f4b2731673bd2687cf105691f815819f809f97d87cc147e6abfdd0b7d9ce27d71cf322d2deb1a63d69fb04cf0b44b0c009f68e641086b00c8113a0dda3d05416bfe6fb30d84c8b3cebe96ba2f8f42bded739274bc9995e59d3394ac8fbb10270da00b290e14462a5d3803cada5a4cd8314b79bb5fd676027a4e0971f8a5c6f08f58798b230e1aadccde6ec511ba0d01053d37f1bf0ce22024257a5d9c249d86ac2cefc2e55c0421f4c64fc47b4cba9449306022f8a32b0ccb0b3f2c9ef0af4ff571dac4eba29c3594284aa65bda92a88276ae7950eaab0be7141dcbeee0d3730c6e4e50dbcfea7c01a069623f4b113a672d4933212d102e3ba406e57001d9f2179773faa476096dc72535c5b3eab99ad7f615b1b29ad1a706c30835a3c874e901094640d4a7fe943d6f68f05cd3af5dbf113de2e6581f28542789e7de78f64bac580502d266337494d0ea94a7299185bcf1391342de89c2d61c33e7fe11c94d2a64c6190a906b0d05416c7e1b4600b0c870625d1e3fe590861ad1ad3ff82378f02dd556a39f925a8a74514d7d439a183949cce31d3dfa741ec91bb35f547427fbbed28d96e3f0690f77716a0342d9233adac93a2b0ae919ae9eac26b2f5cbcc8ec889fdc85c96c880dd43d93e331be01304425810e7dca66618d41a09cb5cccc8886ba3c7f8c89fe2bc89d1148ea0923355d1579da3580ba31dfe83a979a818fb45a524235f03a118a38d6fced85baa9180d6a8a1042d4b9e25d2bd59eccd34bbe7c40b16551d9a8544e9d4124a2bb98926b32700d1bce84283a8b45931b01a13d0ff0bb26eff5f8977cd6ee41b93d1ed03320a89dff9e5946aae01f0cb46c0d0e423a04f154705b3dc865269562ac15723e24de51f0fd5cde24ce91afdc47180fa9e904838a3bb2f3667c2618b0423175914175e0fa05b6cc4812b8b83850517a9fdda67e5f0c832b5844b42a111619b8d338e272bc98d9ca32400261469e7c34504d84e71344752b1698680ffcdfd170784e9d43f5f1117894f3b7e0523ab398d8f65672a95a984d5dde6d416cd6209ff792aafa919ecc45c1a26f5b1144cbe2217b7b4a1c44dd2f3e279d0630ef1485c689ec74f6ccfad58d02c607d27511b6b45c14a3d3ac174e9cc2d97ec9550aa6a6115f5c16005afa210d7b5e5bd3727d7778ff7ea4dbe88ace6e8006b6c17475c042138c29d3b038f5c3e6fbe0daa6961fed681934dc00ac92f62ecaa651db6e695d53b20bc4dc8bc11291add3f2ccb9e64b6f5e3b34c4535d3a9bdc95a8482bddd84e96760f8e3b81d453a58d7b07b1d5be67d9283342b565829551c0842f750bc3fb9f0a7c6e5659d825c0bef3bc661a63d1f9239cecf08223f4726c5325726a322183401bb61245fa97ea377be2bed5d28896799e98568d7cd7f181d5df50670aca2619fb473a0883c3a493ec820dc8d100ea012286336c71e68cfd54e27885c42c19dbb73ec7e19956f9c2f9cbad25a31fa5d18edacf0310e5f7b5e3b565dc455a927e4a0d94903d5f9f4b85dd8150d290ebd6043c2397324655577f08df92e143250f6f1dc872f8f61a883675dec8128813c63b6de91ff2b16cb689e2b3f099fdbaedfece5dc87894fbade21057bf1ac2b59bb4291b29023734697c8527fc56487b1ccf6d0eeea2cda6de36d5582c98ff3e9c6ea6fb1270260ce29c5b4ad28e5c67677179eda5749f49cddd8261af00c5400f65ba36bb806d886cd475fa8af8c5c2e6806b11bf23c3d0c97462df6b7eb5abbf031f1c9e6c13482c78f173f05bc3729c305ad06946250051e3406f2e0d7929329d269f7ebcbde4cabd956536bb43bd72eafcc8737ed33d2b699942b60abb4f6e50e25550499943c9947a43fbc2e705a6e2f76fc2bb013832797c8e6c22c803fd06976b62fd8ebc656c25c5240bcd327a925a0dc5f69d679a23613a4b0e2b5f79dc8e5be01fff3709accc3c58a4877ed0b100eaebd5fa83d1d830a700054dc157053d5b10a1e76e40a898b6690e66d8c5e6ae5a28a4609317ebea0c90ebc4fd57055a3b7d6ea9746852aca59a302529124ca79ccef96c06e789226b284a978143782e1ce3c43f35ab993bebcf02b2c114ebf17b494d2e418b398e5c35fa46f107bad3c84c4b00698911a040a7aa053bbb4d3ea0f0e21190c3342ad7226824eba5dee5b0630e97f58471685c3319ac83a5a0e38067a47d6cd87713bfd918a4674a6fdb8eec77f14f8040ebf374d78dfb5b1473284fe833d0cfac434026745960943f3984dc9abc831e01c9d3993658d1ff34309fff36a21298f779fea624e5ce64f0d5b68fac739abc8c9bf6fe77280716a8a46fe53709b230d549f1df685a68fdab8013fbd1ddb077973e1cfc5aa7cc0b64a9cd98703132f42e29f99909fa4cecf20a358c6ef9c81210bb8cfdd410c45c74cdcd34638931baee2174b81243eb647035c79f70561280ca3d130515f5858001c5e68588bfdd27cbf0fb0681b85e753df71a8d808197be10517ab28e6563c074c9b27b19f1ee906e69ccd9a6e891aa5238411116fef43d1e7f0d557edf9c6d2fcc8b34f25865c316e3075cf859bbe95fd7af00c53cea415e61aea89eeb9ae7b5718ab2b048bac3f31047722a734cfe7a31018453306049b110439bf9e6e590ff1fa2caf19b0e766e70e7e1126f8b7f15c3bf4cad1b6cc03e5cb0a9e9d268b951c9493552e99ef445cf41f1b9b69667f8fdf4b98e8ad356bb1935b4c67df15bf51b52cefd0a0029e425aa3fb45e94c292c01ccb172ca69f19f35df5563c64ef370fd02c29c693eb734b068b29339aa0dac0addd33aa82de871a8a294f9124ac755b21ad9a0437fac1714d2842d74c9abd6ce6987ce70085b07269f96d5a7e3523881fbeee139973982cf266877bcc791aa03721249e6a314d0403fcfecffaee51fef74a7d2dd68d5f799747fd0013048b3ca05f6427f3fefef2a21a1fc96e97dadb10fc3f06b813cfaaa7a8ff9fe355cb7dbdd5b319d1cc6433c7f177662e8a3d1b21e395f3f394748842abc0e59cc486bf72b162f72f8fdebaa4018d96052c62a63b53ba19f3e3e3ee25c95e512cc884d0a5c126a279efd03934198fa511badb114decfd6f4a7faa0b4fba1e2712309579e7fd8b802811f3c7bee088965dc90322cf436d39c59e19089982eb249165b4fd8f8cafb47310777a497db3205a43454ab5e4e6f5db582888e9f283a88ed03c8956f37b4a7bcf38de6d97d00ee2a1348b0556c9afdb2eafaa1b63e3ba47704f69148eb8a635ba3f151ad6875bc83136d7e4aaaa56594862c1cc1630ea346b14198e77a09fbea0ba727ead4d728024da71c2390f414fbfffea0923c26e4eac8ef558a17dc6c4d8654a6d260e6350582f8df8d7d7a1ba3026211d6eda190080c28e3c38e28a21121381edc17b6d42bc9101e46509c8ba3662e607ac5b398f54f401a6f080945b42559296c4a4f938d23a1f935619d1666cf027bb6e9cdca38bebdca5912d5db90b9feda7399103c801504844e9989a068682dbb8c1401415739cc27f7c6ed9f9f3eac9b2736543dde32f0e22e2664eb8b8dfb97bf334599853414a04447fba4c86716e3db060750473eb8f58540d8b746ff4d31e21f664d1857141461c83604ef5120d9f619058997e701f954e455c81b90535a0340a120ca301d4b518452d34d052eceb0bb75596a9c635b7187bd89a701207aac1848a1d4d81c10a3f100fc6f03259e3768e30a37234aeb446c5174a5cf567b0e491dadc6b0866af1d6631d307343a2e0c3ec4f7627fc9c8b966b907708baad9dad18e71cdf94151489cfdc7a93914f02775c9c16a30150dd60748a82afcfd3b8654984f045ff7f5467febadcaa0b81932c4cb56e0a71dc03b8520f23d93e299681f3dd84eb0c2f9b7d0b819d53136dd366745407819c816299662bdede2109300c4821c62c3c4cf4984d02e98f213d2fba8460463748f36f1a3ba39f691b1185bd772b599fdd37cd3e4259d6837a3b3a6b8443de4b83094a34648fe580dab808a9621be4e7c0fac34f2c86d09234ea36364646541edb7c99c0e20756d594cdf7eb43625c3232d393262e7a7c35e7f9f7ad0673b41c88727d3eefb2e76aa4dbdd720c5b0f2db493d6cecfac17972103e38306a0c0342ed014963bf8a45c900406ef663301b7c0e3179491559c1f30a58175d51a35c4fcb7a6099ecbdb7933248c295c476505f88c67baccc211d755a2349f9b4f07e8248cfcce82a1bd3fbbe26e34bd55c297ed356b2c5b63305bf81a404d561fa69509537f3709aee8ccb48e3387fe6aa3489b0c8a6cde1a5138078d5480a0d644672572ad92aeb4d2eb0ad727fc85bd1f0401a20644f6cc5c9d43"}}}, 0x1082) r1 = epoll_create(0x30a33faa) write$binfmt_elf64(r1, &(0x7f0000000040)={{0x7f, 0x45, 0x4c, 0x46, 0x4, 0x6, 0x9, 0xff, 0xfffffffffffffffa, 0x3, 0x3, 0x81, 0xf6, 0x40, 0x15c, 0x6, 0x8, 0x38, 0x2, 0x4, 0x8000}, [{0x60000000, 0x1, 0x9, 0x1, 0x3, 0x2, 0x9, 0x4}], "d41d3856bc99ddbdca16aaf351d4a0a8a6050ecf01186a8c7034936ab0f13cf8b82f7291685263ec514c62008d52156360ecb31500e29ca55b84337cbe3478cbd6bdd26ceb0895c7593472fe3aab1edd4f148442791cb6059024140da598796dfff761c9210cc0f29e04", ['\x00', '\x00', '\x00', '\x00', '\x00', '\x00']}, 0x6e2) [ 2604.780967][T26080] bond1214: entered promiscuous mode [ 2604.820717][T26080] 8021q: adding VLAN 0 to HW filter on device bond1214 [ 2604.875523][T26078] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2604.951588][T26078] bond1175: entered promiscuous mode [ 2604.958083][T26078] 8021q: adding VLAN 0 to HW filter on device bond1175 19:46:39 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000005db3ffffff200000000200000000000000000000000800655800000000"], 0x0) (async) pipe(&(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) write$tun(r0, &(0x7f0000000880)={@void, @val={0x2, 0x84, 0x7, 0xa9, 0x4}, @ipv4=@igmp={{0x1c, 0x4, 0x0, 0x1, 0x1078, 0x67, 0x0, 0x5, 0x2, 0x0, @multicast2, @initdev={0xac, 0x1e, 0x1, 0x0}, {[@noop, @timestamp_addr={0x44, 0x4c, 0x16, 0x1, 0x6, [{@dev={0xac, 0x14, 0x14, 0x22}, 0x80000000}, {@broadcast, 0x3}, {@multicast2, 0x80}, {@empty, 0x8d}, {@broadcast, 0x8}, {@local, 0xfffff800}, {@broadcast, 0x2}, {@remote, 0x2}, {@dev={0xac, 0x14, 0x14, 0x28}, 0x1}]}, @cipso={0x86, 0xd, 0x0, [{0x5, 0x7, "9e4b5cac51"}]}]}}, {0x14, 0x4, 0x0, @loopback, "5b0c8fdfe41179c96b43141636ece85e4a5fedd99ff0984ab3d8f7fd8d94d05c52e0046a5854e4ff23d6185951917720150a6a242feb47d49540c5ebfeaffe945beeebeafeded506fd9006cf1f4c1efdc37bb0513ebb4feaec01136c6255e81260077b733d20eebf99e9be4c93e201313729b220f34a026cfe85d4f9aba785775a8583108e7d9437eac3896b102b85eeee63f244f9eddaad8f885eea8c43147479352009a7f9f9c0086b56eb7632542944ceba49505eab3a8113e87151aeac89e99ded554cb415c75a8abf6a690659ddcf706834903eba59ccf3760d1a4d1c7f4331cd422f1beea2385c323e40ad455c4b9d9f9230fd61e42a61a41dbb434ddfa46bdd732f42ee4fcc155ca6c5b51ac12e3aaf67f866e526e9ad70702a74bbd8abc7f3a0385a1f959af5a72fa75d29345c5e5b54b647cc0b764f43d383c4269c3ef4e96c8163039644becf2a3972a3630f1ca956082bda234f3408a0dfb5ac6dbb500f3b99a477a6f5b82e8734a3568485573bce4e601ecf7c0b523153fbc6aa9298a1b7829ded206e871741d73e0189cedbcbacab2c97449777f7e31baa301538befe8c0634457eaa9846bd6744b273c62c2f90ca6b9a25320455548f560ad6ee37724feab72b83bf0d2db214d7f9faf676ac26f9991a3699e26425bdf24aa77a325899bb2742efc1b9aca65943cb6a80591a57fa71c9dd05d0237720f4845e5eb23a7f3c0539d09e87b1955fc10bb45b6427b47c5290cfb192d33d4165ebaa91840418eaca1893774531a289267055e6ec6d2f798254b85a6ba528a81d33745aec289f447fc83bcef57b56c4b586c5e79ba03b5cad6df5d5dde61948ae52ff6f9ffc8d6881f44667644a3f8dc714f7cca82aae2c31ee2b7fecb6bb1c05ffa8658ac29322335f41a69465a4c2cb1cdbb5fcbdd1a2ca728261337315f0d5d35d77b9c6ca8a803395d49d46162f39a8f390e8103c47561f317fbe04a8135056137a42a06db0b82a330ef051e936918842ffa057669aec242a089407d85a9a4f537f7a863aa48ed104f9224e745143e1390b6d77528696d4d1781fa339e96eb744a0a162c251f431d1495cbc99a9751dc82c18fc8adde740bbe487f903440b4af55249b97469911aeb5971fc3d449a42ba1af9b89556a524e486ee534bf0a5a6b486b32e9d4b68258c158ca9e5d6ee797e11f97f599da564f29d7312379911952b31c40ed07e4bfc3a0a3183accf627081391f06f4f44ea8def4d7d475b448a0bfbebf4963084cc78921271b5e19463a5c07c0de78daef3ed590e967e869e9675ad09f683e35a660b641305eac879339e27a2010cef564a383c9dd3e26ccdca663449f24aae1bffd14eaae46394c69d5dc6ab610eecdcf88a63f6122a5e5fd3c1adaa157da04870c93b220d5de175c231d2f4b2731673bd2687cf105691f815819f809f97d87cc147e6abfdd0b7d9ce27d71cf322d2deb1a63d69fb04cf0b44b0c009f68e641086b00c8113a0dda3d05416bfe6fb30d84c8b3cebe96ba2f8f42bded739274bc9995e59d3394ac8fbb10270da00b290e14462a5d3803cada5a4cd8314b79bb5fd676027a4e0971f8a5c6f08f58798b230e1aadccde6ec511ba0d01053d37f1bf0ce22024257a5d9c249d86ac2cefc2e55c0421f4c64fc47b4cba9449306022f8a32b0ccb0b3f2c9ef0af4ff571dac4eba29c3594284aa65bda92a88276ae7950eaab0be7141dcbeee0d3730c6e4e50dbcfea7c01a069623f4b113a672d4933212d102e3ba406e57001d9f2179773faa476096dc72535c5b3eab99ad7f615b1b29ad1a706c30835a3c874e901094640d4a7fe943d6f68f05cd3af5dbf113de2e6581f28542789e7de78f64bac580502d266337494d0ea94a7299185bcf1391342de89c2d61c33e7fe11c94d2a64c6190a906b0d05416c7e1b4600b0c870625d1e3fe590861ad1ad3ff82378f02dd556a39f925a8a74514d7d439a183949cce31d3dfa741ec91bb35f547427fbbed28d96e3f0690f77716a0342d9233adac93a2b0ae919ae9eac26b2f5cbcc8ec889fdc85c96c880dd43d93e331be01304425810e7dca66618d41a09cb5cccc8886ba3c7f8c89fe2bc89d1148ea0923355d1579da3580ba31dfe83a979a818fb45a524235f03a118a38d6fced85baa9180d6a8a1042d4b9e25d2bd59eccd34bbe7c40b16551d9a8544e9d4124a2bb98926b32700d1bce84283a8b45931b01a13d0ff0bb26eff5f8977cd6ee41b93d1ed03320a89dff9e5946aae01f0cb46c0d0e423a04f154705b3dc865269562ac15723e24de51f0fd5cde24ce91afdc47180fa9e904838a3bb2f3667c2618b0423175914175e0fa05b6cc4812b8b83850517a9fdda67e5f0c832b5844b42a111619b8d338e272bc98d9ca32400261469e7c34504d84e71344752b1698680ffcdfd170784e9d43f5f1117894f3b7e0523ab398d8f65672a95a984d5dde6d416cd6209ff792aafa919ecc45c1a26f5b1144cbe2217b7b4a1c44dd2f3e279d0630ef1485c689ec74f6ccfad58d02c607d27511b6b45c14a3d3ac174e9cc2d97ec9550aa6a6115f5c16005afa210d7b5e5bd3727d7778ff7ea4dbe88ace6e8006b6c17475c042138c29d3b038f5c3e6fbe0daa6961fed681934dc00ac92f62ecaa651db6e695d53b20bc4dc8bc11291add3f2ccb9e64b6f5e3b34c4535d3a9bdc95a8482bddd84e96760f8e3b81d453a58d7b07b1d5be67d9283342b565829551c0842f750bc3fb9f0a7c6e5659d825c0bef3bc661a63d1f9239cecf08223f4726c5325726a322183401bb61245fa97ea377be2bed5d28896799e98568d7cd7f181d5df50670aca2619fb473a0883c3a493ec820dc8d100ea012286336c71e68cfd54e27885c42c19dbb73ec7e19956f9c2f9cbad25a31fa5d18edacf0310e5f7b5e3b565dc455a927e4a0d94903d5f9f4b85dd8150d290ebd6043c2397324655577f08df92e143250f6f1dc872f8f61a883675dec8128813c63b6de91ff2b16cb689e2b3f099fdbaedfece5dc87894fbade21057bf1ac2b59bb4291b29023734697c8527fc56487b1ccf6d0eeea2cda6de36d5582c98ff3e9c6ea6fb1270260ce29c5b4ad28e5c67677179eda5749f49cddd8261af00c5400f65ba36bb806d886cd475fa8af8c5c2e6806b11bf23c3d0c97462df6b7eb5abbf031f1c9e6c13482c78f173f05bc3729c305ad06946250051e3406f2e0d7929329d269f7ebcbde4cabd956536bb43bd72eafcc8737ed33d2b699942b60abb4f6e50e25550499943c9947a43fbc2e705a6e2f76fc2bb013832797c8e6c22c803fd06976b62fd8ebc656c25c5240bcd327a925a0dc5f69d679a23613a4b0e2b5f79dc8e5be01fff3709accc3c58a4877ed0b100eaebd5fa83d1d830a700054dc157053d5b10a1e76e40a898b6690e66d8c5e6ae5a28a4609317ebea0c90ebc4fd57055a3b7d6ea9746852aca59a302529124ca79ccef96c06e789226b284a978143782e1ce3c43f35ab993bebcf02b2c114ebf17b494d2e418b398e5c35fa46f107bad3c84c4b00698911a040a7aa053bbb4d3ea0f0e21190c3342ad7226824eba5dee5b0630e97f58471685c3319ac83a5a0e38067a47d6cd87713bfd918a4674a6fdb8eec77f14f8040ebf374d78dfb5b1473284fe833d0cfac434026745960943f3984dc9abc831e01c9d3993658d1ff34309fff36a21298f779fea624e5ce64f0d5b68fac739abc8c9bf6fe77280716a8a46fe53709b230d549f1df685a68fdab8013fbd1ddb077973e1cfc5aa7cc0b64a9cd98703132f42e29f99909fa4cecf20a358c6ef9c81210bb8cfdd410c45c74cdcd34638931baee2174b81243eb647035c79f70561280ca3d130515f5858001c5e68588bfdd27cbf0fb0681b85e753df71a8d808197be10517ab28e6563c074c9b27b19f1ee906e69ccd9a6e891aa5238411116fef43d1e7f0d557edf9c6d2fcc8b34f25865c316e3075cf859bbe95fd7af00c53cea415e61aea89eeb9ae7b5718ab2b048bac3f31047722a734cfe7a31018453306049b110439bf9e6e590ff1fa2caf19b0e766e70e7e1126f8b7f15c3bf4cad1b6cc03e5cb0a9e9d268b951c9493552e99ef445cf41f1b9b69667f8fdf4b98e8ad356bb1935b4c67df15bf51b52cefd0a0029e425aa3fb45e94c292c01ccb172ca69f19f35df5563c64ef370fd02c29c693eb734b068b29339aa0dac0addd33aa82de871a8a294f9124ac755b21ad9a0437fac1714d2842d74c9abd6ce6987ce70085b07269f96d5a7e3523881fbeee139973982cf266877bcc791aa03721249e6a314d0403fcfecffaee51fef74a7d2dd68d5f799747fd0013048b3ca05f6427f3fefef2a21a1fc96e97dadb10fc3f06b813cfaaa7a8ff9fe355cb7dbdd5b319d1cc6433c7f177662e8a3d1b21e395f3f394748842abc0e59cc486bf72b162f72f8fdebaa4018d96052c62a63b53ba19f3e3e3ee25c95e512cc884d0a5c126a279efd03934198fa511badb114decfd6f4a7faa0b4fba1e2712309579e7fd8b802811f3c7bee088965dc90322cf436d39c59e19089982eb249165b4fd8f8cafb47310777a497db3205a43454ab5e4e6f5db582888e9f283a88ed03c8956f37b4a7bcf38de6d97d00ee2a1348b0556c9afdb2eafaa1b63e3ba47704f69148eb8a635ba3f151ad6875bc83136d7e4aaaa56594862c1cc1630ea346b14198e77a09fbea0ba727ead4d728024da71c2390f414fbfffea0923c26e4eac8ef558a17dc6c4d8654a6d260e6350582f8df8d7d7a1ba3026211d6eda190080c28e3c38e28a21121381edc17b6d42bc9101e46509c8ba3662e607ac5b398f54f401a6f080945b42559296c4a4f938d23a1f935619d1666cf027bb6e9cdca38bebdca5912d5db90b9feda7399103c801504844e9989a068682dbb8c1401415739cc27f7c6ed9f9f3eac9b2736543dde32f0e22e2664eb8b8dfb97bf334599853414a04447fba4c86716e3db060750473eb8f58540d8b746ff4d31e21f664d1857141461c83604ef5120d9f619058997e701f954e455c81b90535a0340a120ca301d4b518452d34d052eceb0bb75596a9c635b7187bd89a701207aac1848a1d4d81c10a3f100fc6f03259e3768e30a37234aeb446c5174a5cf567b0e491dadc6b0866af1d6631d307343a2e0c3ec4f7627fc9c8b966b907708baad9dad18e71cdf94151489cfdc7a93914f02775c9c16a30150dd60748a82afcfd3b8654984f045ff7f5467febadcaa0b81932c4cb56e0a71dc03b8520f23d93e299681f3dd84eb0c2f9b7d0b819d53136dd366745407819c816299662bdede2109300c4821c62c3c4cf4984d02e98f213d2fba8460463748f36f1a3ba39f691b1185bd772b599fdd37cd3e4259d6837a3b3a6b8443de4b83094a34648fe580dab808a9621be4e7c0fac34f2c86d09234ea36364646541edb7c99c0e20756d594cdf7eb43625c3232d393262e7a7c35e7f9f7ad0673b41c88727d3eefb2e76aa4dbdd720c5b0f2db493d6cecfac17972103e38306a0c0342ed014963bf8a45c900406ef663301b7c0e3179491559c1f30a58175d51a35c4fcb7a6099ecbdb7933248c295c476505f88c67baccc211d755a2349f9b4f07e8248cfcce82a1bd3fbbe26e34bd55c297ed356b2c5b63305bf81a404d561fa69509537f3709aee8ccb48e3387fe6aa3489b0c8a6cde1a5138078d5480a0d644672572ad92aeb4d2eb0ad727fc85bd1f0401a20644f6cc5c9d43"}}}, 0x1082) (async) r1 = epoll_create(0x30a33faa) write$binfmt_elf64(r1, &(0x7f0000000040)={{0x7f, 0x45, 0x4c, 0x46, 0x4, 0x6, 0x9, 0xff, 0xfffffffffffffffa, 0x3, 0x3, 0x81, 0xf6, 0x40, 0x15c, 0x6, 0x8, 0x38, 0x2, 0x4, 0x8000}, [{0x60000000, 0x1, 0x9, 0x1, 0x3, 0x2, 0x9, 0x4}], "d41d3856bc99ddbdca16aaf351d4a0a8a6050ecf01186a8c7034936ab0f13cf8b82f7291685263ec514c62008d52156360ecb31500e29ca55b84337cbe3478cbd6bdd26ceb0895c7593472fe3aab1edd4f148442791cb6059024140da598796dfff761c9210cc0f29e04", ['\x00', '\x00', '\x00', '\x00', '\x00', '\x00']}, 0x6e2) 19:46:39 executing program 3: syz_emit_ethernet(0x66, &(0x7f00000000c0)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b00000000000008e3ff0086dd080088be00000000100000000100000000000000080022eb000000002000000002000000000000000000000008006558000000005db7bf77179b330e7098fb3c0902dd3da00ec62e3b8c1684678279500c15391a2ee33f53cce9976aa9ad194bd17c849fd56a3d526bf310d013acf3b58573296a8f55cb3895ec30d1182d75a5a40a6696ea2e2f37135599ee6d394a87bbb3be1a797c2184950071ef45d13fc03f8077e328fc14163c8ea1442c231cfafd0d9fcaedadffec6404e60b7728726f1831b7f4a64d5ceb703989a27748e0cf2fdffa16541e08955682a924c139fc271b0e5238030cf91b202fa15717d63b405c0a5d3e"], 0x0) syz_emit_ethernet(0x1e, &(0x7f0000000040)=ANY=[@ANYBLOB="aaaaaaaaaa38ff010000000000000091bda5000000000000000000"], &(0x7f0000000080)={0x0, 0x1, [0xdbe, 0x66e, 0xde4, 0x4c2]}) write$tun(0xffffffffffffffff, &(0x7f0000000200)={@val={0x0, 0x1}, @void, @mpls={[{0x2900, 0x0, 0x1}, {0xffe01}, {0x86fe, 0x0, 0x1}, {0x8, 0x0, 0x1}, {0x1ff}, {0xc000}], @llc={@snap={0xab, 0xff, "02a6", "381129", 0xa00, "9425b4ab948f03299e84c12b61d3c7"}}}}, 0x34) syz_emit_ethernet(0x1016, &(0x7f0000000880)=ANY=[@ANYBLOB="000000000000aaaaaaaaaa3491002d0081000e00886cff3704148a7e3f2d4b2a952b7e4df4f656c5b3f8f405a1359da0d9694fd27b1e9f43295edec04c4558fee08c030bea58a4b2677c7156285b8acbf8eeac5b1941386fc05e92a0222c03dfa4020338a758994f9a6e3eb6908ac43dcbc77cc351de9759ed7873ddf6dc99cb43c01f43c80bc865470ab36b35e7f2f20d962f5d67ac9e58910fa505e88b0c8f85eb3ca24b0d5c2b59f29c908a0b9d746a36faf151277b1af0fd759da8e061d78a1a053200343ac109381fc3405f8ded0c7941d0e3495c4c0acb1abfdbd2c1c9758ab5c54c31b6bb84749db5072766be1a13eda10cf6b38bc048b265a5996b2e34fdf926b741f351b5ecea7cf1ccb00dc21594fdcf523622fc0f520d31563460fa1a0465637230cb94a257192ad527436bf44eab1e4bd50187b736fb55dde1791d84dccaa3b3f3a412bb7ae1afa92c9a4cb556f22c12935cc733a66674a42b564562adcc1eada6e3de2d18027421688f80ba2abb4c12350ec963d508cfee86e20894f766fe0522765df5bc0fd3d9d83e0e54e627bbf4a2b2a2b2fdd8b0ba8bc2c4ec16afb3cf162756633be6c564d4dd5feaff6e0d202b9582168b7a4d54224867bc0b9b9af82d89e9f116e02a1814f5423a0c422ab963ad53718b82af92acefee99cc5633433610620aadb7653756f54633be2e9122ee89276f7d337d605cd9bd54be0b7e5c0b37cd3965696b35be803bd9957393b639541bd6f8ab6f8fd1412a06fb0ed77b9fc181da7cf011a50827288bfc09c0bcc649712f683129c948a526b9810c55fa2351ff5e294868196d96d636b1b1ded917a6870158c6f1e627da08d7d716799a3db8a4fd3dff15177f415808c0db061de5abf868ab572fbf4cfebaff071eb1a2d7e906bd69763d6c8e5f4c576773849d494bd43cb508542bd8b8cb1160d0805fa60a2f140825333b8584b170472ff0eb3ed785e9035e12c94378e06323a6c7a66fc503c1cc1c041192ed0fe3805d7932511b015dcf0dbdc4e358b5617dd44df21fabb706ddc16aa185ba861df73403d29cc720070f964272c6dce082d60a3ced4e487bc400083f1ad5c610b7460f33f976fb3543d70d49cc0d489b410b96a7ce77428d3a9a390bfc7dddbb6bd7df5834c8a98d9c9a58c3d36f7db2de51483b76774bbe0fe57059908763b9135a8e030886e49ab1c3cee2d448a193d05d68f3d22bcc92727d4993a1f4e9f09dd4a668afbd3ac7f70a6e5549777a5c5d6d234d36e97a4d5081d62c5702b7f209aa256a86a35fa03cb8be531fe6ace428220b2677415b074e1e65b181c6246d22048ce314bc6d3ed57b7dca5f91cbe75e3c65dd97cf1d049deb78807a58c4320a1e6fb8fcb43829d9c99b0a8af49b82064a7e639ca8af10bd6ca2e69503f2ac9b8939c8f0a5c15db4de8d7bf56449a69ccdaa031af9f22a5bedd4fb164f26c7dff1e32a6783d15e073e4d5c61623c821c52e108618260ed4c2402255ec24f82a884364f0b7bcab318bd4accb7ec6b7787c20b228e442ab33b53008f1ac8b5b1710de26b2aff9fbb3c4362d0b56944b7771fd9ea0e40541eff1a2e04a055837b2943f01e2c2746a28a1decd964e31ef0ecabab2107713f1444e71c145e71a00658662de014805baab682ab9f75fd29707316ff208ed8a2bdc98521d7c1b6bde395da01e43d1ec95d5f150f4bd95ccffb45387d2620cc606b7bd5909449a0ead3a7394383c243c885b0f51b37cc7813c33a29cf1290e6baf6924f070899ea41f3ea7ace1bade77decc6c6c1cdbe4ece040c6c09996ffad5d666da12c8017a15baedcbbe9642f17fdb2ebaff9d3f8f3089ea85b889992d86922b23435fef4f71200279cc135a7c84bebc59e1c609b677b1007cd2296eb632d090da27ec527272896ebcc5ddbe556431c3d1967256f6a937877b49d0ba727b64bb44827da57b7afcfe8752d361097598f50d63fb12e9f1c587c0eabe966eb4803dc428e957429e89c8cb04dfeab0fedbaf9caaf083db237f57a1a2a3575f3f6a8f26db3b27d8b63ccb021908a7727354809867f51f62498d40fecdd30f09b03bdaa9a9b03fc34c6cd0eba12de45a935776ea2ada0cc4fd4740cb877a6d3d9018078621723a846a1c6e5f9356b6ef86f6af50a327ee45cf6a15f5a0465d08eecb0a2b784294e3bbf008984f1d55dd38ab51e3a4715acd6173f753a855655ff61dc7a63a79f603c23aa75a3eb53dd2575f5cc72dce538ad2786a80dd68bc098d422b5e4077c9fc62133a3a45c2cf3014e7ae1e551d509c180f5619bfe0d7b180c01e98ccadc27cbe88296ba5b1538cd182a309a0e4dfe2fe964fe08fb355c2f7c1bcf732c1bd616c994abd0ff6396022cd3d8790e0d6c3b9264bd4ee922414643a2eaf2cc4b9388d948c3a0dcca3f71b7baef1f791a56c902d5ba5ed2849fb90726eae9c084aa3de73d0301cfafa56ac3b59c7830e945eb438187b0da89e236fd83f93fe884badca55ae6dab9bfac8e7479bb226967063f5de326e80212e59ac507cd02864ddab28654416dadd6426654ee11977519f63df012985bf70e9b548b480e277ee1144a008ad3ad66b982ee4a612e2de07f209c7fc63776cce9bf8c9e9af64cfc6b9957dbcf2399c8a5039f65409ec2cfec598006f7f43d3b4bda4067ac2c1e75797655606da14af01d1578caf7b1e360c3cf206a90ee2f43ededa1ffca5e6689034510300101cf3e930da80b31b9ebe3c7734136d161ce77eac6c31b3a76573e253484c28e211b2ace88945270bc52282bb62701510d60ac73e1e5cd6bf306f9d0c20698eb4ae784f31be948580d2aaaef23c696533baefb268afd6e9ff82b1da5032e77c031c5bf7599dff1f75e98382646f2c22f7501103fb3933766e77ee4e6f26a0c0e14c762793a1ad2de9e3c548c72738686312eaeae7156240f78acab715ebcfafeccab3a4cb7d0dbe9eaabc84b30f8ce0d334b444a82946ec2d3455188b5d26355cf7cd23bd22ef783a538bbe0c88ff93b29380fa933e05007f812fc0e281880808cf612891f64f585e66324124f9d94d59836a1560c8745456b5982c0fff2dae034d973211353a7bc3d3b5bc1a06ca6e81ee85b0f3bd1cab125b158838da474da75bdfb6afc154e0d1fd82c37e6b58a341a2f1895d684964bfd872e5ff79eb9980b5875e5310a0ffe060154e800bb7b96e4461cd112980517db29367105ed5b42243b029138a7d14719063879d0a593351f1ae7f0bbdd72ecfa1ae3cbaf204b95d041db5fb16b5e168660894baff54fc65ce74bf2324141cdc85ec10eca59b0999800ce5409451bb087a985e15c924e72afa7351c73b564c519d3308317e653b4e7fd0af0c9071e29e54cbc57c803b58a3dc19fc7c6a736b4863b8686a34ee43aa8c71e420f88ce71af7d3ce24c182b28ec8404bdedaaacf3ad5554486c501868830ef0343456f873634bc8a36d7fb9025701b332bda1fe9e0939a6e5198c3bda81e78d5e9e91125983f5adb20384ac7907a0f9c5ab63b16a1b9551a29d57233b7f1a3c3e1ea17c482f5e4afd80622f97b75f46638720c46e648f049a0840cc7d5622d92a892dc9bef3cef0b9b23cbebd00ac25d96443e257ae472b6d0e9323b44039e960851e5e3f3f2236a9e2a07148f68cc5cd357ed7512d70415b4248e09abde1da67eb7aa94ee8634de4c550acec389337494177d503db954e991fc45280386f2ccefc75acb36d51f98785da15c8c6082cbe94006e0598da23aeb1d411ca41a04ad9e2a975c4f0eab7f90a39c5155947659fa82398471e4aee8baf3706f7c416c0bc8eb99497af659085313a5d5ba3d74972d8a319dbb8b4d48e75c0c5888f393d9561a52593d6b51a6f7502c755028180b14efe991a7c8003c5ca9415297f2a3a192a90e997dd3fa7a130223cbb62206048b563e6e7a1a22f4ae05b4264613a8059c72eed01a9b050b506af38eb55b13204fc53eeff605c428e6e3717246afc0711eea250f23fda8f122288c74dd358dfcff52b5a5bc65d723e38ac488c917c61f6c7456cdd5282685206bddfd4d0cab2ecfff08284782ed453d22092d1274b246ff122beee0a140f2f3ea2f971ab206fd8ac6f5296ce511d98a7c17fe61a9157de29e216f4762215d394019a7eb19da27d1b24673e23c1e97252bac36d2617ab7ed4b3e41fe0a42c3b1deda9b69f09334461f38eae6adff8b87b84ef4548bec81279f6cd0f18751cac823b5af34a2ef1043c9859bbdf5d4bbdbe594d601c20c3904a0d603887b9624fddf5bd01bd73d817c3d79486fd217813f1588abfca5254949fbdd75a337a558cbaa96da976247d3e0bb5cce63b40a56a413289bac2e31fc0a61ebd62e50e1e56130ab01ed564c1a275a00c6c87917d1c6945b446581f96ee71717389ba014af29068440febe7dff9278ba3d16c671c7092d33b35f7a9f54c1ac9af864378d8969080dda37d4d2498b3e38a6d3535711db2625749ab8490f4f6faae81b904fb0049290a6662d05e8d5d01334474b77d582c749c48508bf58df7b4734f9161b6bed30fa70bf05d60fce6b9c7cfa53f4d3c0f706de225bbc83fba0ea6ec0064577e82815da54d757d6c12e7fc65d94c13de70a405aca384f8d3adb26d4a96875655e7837ea6b781b07bc3d2448e8366450176e4ed81287e2066d1587f33fc9db6b94e5eb43e8703d53d37b989901f07f8db5d8021ecd2a3c9ded32159a3ab45b02a7d08878c3c53945ebf4f2929e2e8d3e8fa28342f490f8dfc435f37d99c3f32bd4e95f12676b8585c426859152bd95c3b4dd2dca53a6c31a728148d5d81bd1c132fdbbe90e729bee19ff9b91fe295f746cda7b548af845f133b04d5ba37b447939689a03a927b548610515664ef3b4333ac81a5abd2d4d725c2c9ee95c6ed098415fc496d131378e57e8a48eb6e75c588df75e0ce4724cd621fa5a7d1d3b45e8bdef8e20902859474af0ca5754d1e0d0f3ff26768469553974e29ec6297ef1fa7d01b22b73a4602140804f0811c8009a900bae094ce52f0852fa7f23522bfa162ff209b8c9a6bc2825a5006ea13e546223e3d940ced5f165f2404b892052bb48b16b8836c1bd3b7f6773cdd38f0f7036e83ca68a3ad0fe3ee6c2f87b0e35538b1deb739e193111741d3e7e6350b14533415c955bb88db5cf80a29b28c8f816616d4bb8c42d9a7aa3d6ce4fab89e37275d55a986717406fa54f04d6cae2df0fc0a417dc68abd6136d4b347716d5bb4d0b0d95ad0084a3969c5f793db00762a2dd3abe0818183b2cdfea1769b4b0ce35dac5741324117085f83af2e108c64470c5609bd196e0b7bbd71459a02dd0744d210ee36329d9d937a8b7f4d9e5e8a382e46284a68da389512a0a8817e612a15474c2c7c6373432fc3f9d87b5491f2c33edcb8a7b6d0f23f4fd73ee777a7647feba0d35d12b138629b7c91014c074e7b63d8b39be2e1726acb3937207a91ee290a252f1c5526f104b765b8d48773b0474ceae1f26120355d7ba7b0b0ae0b172866af96bc38525fc377f8aa432be2c2c1cda192ca07eccf5e0884a183c9d194885cc396f6a85ef693867d6ec688a60e5e51a465ed35c2c51855b99429775f96f1d9e63275ad0ff07b6d7a33fb110dac8e621f56da8146b0a285ac3e50a906e9c2557bb67895cbfdc559e6133ed42d0828f68d7815d2297ad5f9ff8d683df3225b8dda0dcf05c8e26a8ee8862b543a915ee868aa9e70d508e2299aecc5a27d89cd6bdad7ddf64baf1fbae56a8727355e23995d3875d98696cc1542841166c307b84c1e529965fe05bf916f8"], &(0x7f0000000000)={0x0, 0x4, [0x5f8, 0x429, 0xaa2, 0x612]}) [ 2605.184286][T26084] bond1099: (slave bridge1029): making interface the new active one [ 2605.216141][T26084] bridge1029: entered promiscuous mode [ 2605.285365][T26084] bond1099: (slave bridge1029): Enslaving as an active interface with an up link 19:46:39 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x5a8d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2605.472663][T26085] bond1214: (slave bridge1144): making interface the new active one [ 2605.495249][T26085] bridge1144: entered promiscuous mode [ 2605.507433][T26085] bond1214: (slave bridge1144): Enslaving as an active interface with an up link [ 2605.543979][ T27] audit: type=1800 audit(1690919199.579:1864): pid=26086 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=collect_data cause=failed(directio) comm="syz-executor.2" name="cgroup.controllers" dev="sda1" ino=1955 res=0 errno=0 [ 2605.636148][T26087] bond1175: (slave bridge1078): making interface the new active one [ 2605.645778][T26087] bridge1078: entered promiscuous mode [ 2605.657503][T26087] bond1175: (slave bridge1078): Enslaving as an active interface with an up link [ 2605.713386][T26112] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2605.808083][T26112] bond1100: entered promiscuous mode [ 2605.824602][T26112] 8021q: adding VLAN 0 to HW filter on device bond1100 [ 2606.027003][T26113] bond1100: (slave bridge1030): making interface the new active one [ 2606.074187][T26113] bridge1030: entered promiscuous mode [ 2606.099859][T26113] bond1100: (slave bridge1030): Enslaving as an active interface with an up link 19:46:42 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000280)='cgroup.controllers\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cpuset.effective_cpus\x00', 0x275a, 0x0) ioctl$FS_IOC_SETFLAGS(r0, 0x40086602, &(0x7f0000000100)) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000000080)={0x288000c, r0}) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) write$binfmt_misc(r2, &(0x7f0000000500)={'syz1', "4a7547aff0800d1fa75912a4dc682001e220889a5d16a1043e6b38b6b59622b53384762ba177dc8ebdd13f37c53db1e31c801e6a256c9ca3e9ffe2a3f4d1e9f48bdf5e95640125b1bc2e35860dc8c77195db9e5b8f58c741f51b97b614f9d8558e20af5c5c49c6a5407ae93fd808435118b4778b03294d51ed4af0b12388972e8def2a78241f9268e913a1caa3938a997f37a3b4ed07a6ebf9e34d3e7724a30de080e6f9f7826feab92d3e89020fff67440ec95f406abb6943a4774fe2eea59128a7af9a035943294fab02547887a0e569ed46dc570e3dfe1490b3d89e5152a992e3f263"}, 0xe8) r5 = openat$cgroup_ro(r0, &(0x7f0000000140)='blkio.bfq.io_wait_time_recursive\x00', 0x0, 0x0) r6 = openat$cgroup_ro(r5, &(0x7f0000000180)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r7, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x100000b, 0x28011, r6, 0x0) r8 = bpf$ITER_CREATE(0x21, &(0x7f00000001c0)={r7}, 0x8) getsockopt$inet_IP_XFRM_POLICY(r1, 0x0, 0x11, &(0x7f00000002c0)={{{@in=@multicast1, @in6=@empty, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}}}, &(0x7f0000000240)=0xe8) r10 = gettid() r11 = bpf$PROG_LOAD(0x5, &(0x7f0000000080)={0x11, 0x8, &(0x7f0000003500)=ANY=[@ANYBLOB="620af8ffa1dc0021bfa100000000000007010000f8ffffffb702000007000000bd120000000000008500000010000000b70000000000000095000000000000003faf4f2aa3d9b18ed812a2e2c49e8020a6f4e0e4a9446c7670568982b4e020f698393aa0f3881f9c24561f1b2607995daa56f151905ea23c22624c9f87f9793f50bb546040677b0c5077da80fb982c1e9400e693146cea484a415b76966118b64b751a0f241b072e90080008002d75593a286cecc93e64c227c95aa0b784625704f07372c29184ff7f4a7c0000070000006056feb4cc664c0af9360a1f7a5e6b607130c89f18c0c1088d8b8588d72ec29c48b45e0000000000000401d01aa27ae8b09e00e79ab20b0b8ed8fb7a68af2ad0810000000000006fa03c6468978089b302d7ff6023cdcedb5e0125ebbcebdde510cb2364149215108337719acd97cfa107d40224edc5465ad32b77a74e802a0dc6bf25cca242bc6099ad2300000480006ef6c1ff0900000000000010c63a949e8b7955394ffaff03000000000000ab87b1bfeda7be586602d985430cea080000000000000026abfb0767192361448279b05d96a703a660581eecdbf5bcd3de227a167ca17a0faf60fd6ad9b97aa5fa68480366c9c6fd6fa5043aa3926b81e3b59c9b081d6a08000000ea2b1a52496dfcaf99431412fd134a996382a1a04d5bb924cfe5f3185418d605ffff9c4d2ec7c32f2095e63c80aff9fa740b6c7632d5933a1c1fa5605bd7603f2ba2a790d62d6faec2fed44da4928b30142ba1fde5c5d50b83bae645ffa4997da9c77af4c0cb97fca585ec6bf58351d578be00d952aab9c71764b0a8a7583c90b3433b809bdb9fbd48fc877505ebf6c9d13330ca006bce1a84521f14518c9b476fccbd6c712016219848624b87cec2dbe98223d8d9e86c5ea06d108d8f80a0eb4fa39f6b5c02e6d6d90756ff578f57000000009700cf0b4b8bc229413300000000000000000003000000000000000000000000001000000000559711e6e8fcffffffffffffffb2d02edc3e01dd271c896249ed85b980680b09000000000f0000169cdcacc413b48dafb7a2c8cb482bac0ac502d9ba96ffffffd897ef3b7cda42f83d53046da21b40216e14ba2d6af8656b01e17addaedab25b30002abbba7fa725f38400be7c1f001b2cd317902f19e385be9e48dccf1f9f3282830689da6b53b263339863297771d74732d400003341bf4a00fc9fec2271ff01589646efd1cf870cd7bb2366fde4a594290c405ff870ce5dfd3467decb05cfd9fcb32c8ed1dbd9f70a64c108285e71b5565b1768ee58969c41595229df17bcad70fb4021428ce970275d13b78249788f11f761038b75d4fe32b561d46ea3abe0fa7956488bef241875f3b4b6ab7929a57affe760e797724f4fce1093b62d7e8c7123d890decacec55bf404e4e1f74b7eed82571be54c72d978cf906df0042e36acd37d7f9e109f2c06f815312e0cfe222a06f56dd022c074eb8a322fb0bf47c0a8d154b405c37feaf3dd95f6ef2acd1fe582786105c70600000000000000b7561301bb997316dbf17866fb84d4173731efe895ff2e1c5560926e90109b598502d3e959efc71f665c542c9062ece84c99a061887a20639b41c8c12ee86c50804042b3eac1f870b136345cf67ca3fb5aac518a75f9e7d7101da841735e186c489b3a06fb99e0347f23a054de2f4d92d6bd72ee2c9fdc75aaaf1e3e483b4ad05573af403269b4a39ce40293947d9a631bcbf3583784acbda216550d7aec6b79e30cbd128f91e358c3b377327ac9ecc34f24c9ae153ec60ac0694da85bff9f5f4df90400000000000000d6b2c5ea1393fdf24285bf16b99c9cc0ad1857216f1a985f369191ae954febb3df464bfe0f7f3ee9afe7befb89d2777399f5874c553aeb3729cffe86e669261192899d4562db0e22d564ae09bb6d163118e401e024fd452277c3887d6116c6cc9d8046c216c1f895778cb26e22a2a998de44aeadea2a40da8daccf080842a486721737390cbf3a74cb2003016f154772f514216bdf57d2a40d40b51ab67903008485b3b8a8c9ae3d14f93100c2e0893862eef552fcde2981f48c482bde8a168c3f5db2fea6f26e4a4304e50c349f4f9ecee27defc93871c5f99a3594191e104d417e60fc3541a2c905a1a95e9571bf38ae1981c4238ecaee6f75cd0a6881bd1517a8250df98674152f94e32409e2a3bce109b6000000000000a1fec9000000d694210d7560eb92d6a97a27602b81f76386f1535bef1497f92186086e29c6bc5a1fad6ec9a31137ab79a404abde7750898b59270bb29b81367ac91bd627e87306703be8672d70d1ab57075228a9f46ed9bd1f00fb8191bbab2dc591dda61f0868afc4294859323e7a45319f18101288a0268893373750d1a8fe64680b0a3fc22dd704e4214d00000000d6c98cd1a9fbe1e7d58c08acaf30065b928a31d2eca55f74a23641f61f2d5b308cf0d031b0c7f0ce21d69993e9960ff5f76015e6009756237badf4e7965bbe2777e808fcba821a00e8c5c39609ff854356cb490000000000c1fee30a3f7a85d1b29e58c77685efc0ceb1c8e5729c66018d169fc03aa188546b3ad2a182068e1e3a0e2505bc7f41019645466ac96e0d0b3bc19faa5449209b085f3c334b47f067bbab40743b2a428f1da1f68df75cf43f8ecc8d3726602111b40e761fd21081920382f14d12ca3c471c784ae7da7eaa69eb7f7f80572fdd11bb1d070080fbc22bf73468788df51710eb0b428ee751c47d8e894f745a868404a0bf35f0121008b722b1eaa6aedfa1bf2e7ccb2d61d5d76331ff5e20fa26b8471d42645288d7226bbd9c9e9e1cc9eb3d541e407cc2dae5e690cd628ab84875f2c50ba830d3f474b079b407000000deff000040430a537a395dc73bda367bf12cb7d81691a5fe8c47be395656a297e9df0e71b967ce7daac4be290159f6bcd75f0dda9de5532e66ae9e48b0ed1254a81faae79b6af6fbb869604d51de44c4e0973171ad47d6c00ebc7603093f000000fdec743af930cd6db49a47613808bad959719c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f15d6533f78a1f4e2df4ca23d867693fd42de9b49a1b36d48a44ba6a4530e59bec53e876dc660dd63bed8d31c31c37a373d4efd89f0000377b1b1292a893a516dab183ee65744fb8fc4f9ce2242e0f0059161c5e0000000000000000000057d77480e0345effff6413258d1f6eb190aa28cbb4bafe34124172e436b176c7ed4b132fb805d5edd9d188daf28d89c014c3ecca10ae55704544673e1fa03b84f63e022fe755f4007a4a899eaf52c4f491d8e97c862e29e457060000007ac691faee1e0c8fe056a07474e6e5490a7d3c3402000000b60600d837c6befc63ddf2f594ad7cbc56a1e44d218c956a5392a995f1fae8e9f206efbb33854dc70104d74dc07748f9745cb796da2dfb714a0500000000000000faed94fc39acfb3fd25dfa8116a154cd1226e1bb72b59fed817072a0da60160761fd3dffda0f7c592eabd8ab68334d2a1693cb187539049e331272bf5135044df8161400211b8012b6eb1ed5656e83f65509bb4b323c5bd61bff949d3bade2f6ffda1360c2786e16937ab61d6dcafed319c716357d0885f9c6d1f442954c167dd9b4acd9468ce3674c82bbb2e31389179b025dbe063b7f906217b2cf8410c7023aa3e5cc3ba1000000000000000000000000000000006ae6301a2da44394275c582a6516bb92ea1980a0a659f2f1811c8b281c209647c4241f292b20508b215dde27bb2487a6e2b5e4a8ccfab90c23827ef06cbe364073005f8a6d1456aaeb85ffb7858f24eced67a67ab825e863928ed64c83f62ffdaa997657335b63c6b4163aff094059e626766845fd779c9e6cdbbd64c24936615ee68538e8fddd0d90f3a7579579a142c0f7b318264d5c13c31cf475829528267ead38523cab7e1664e8426ca85e82ccf821c8a02a7e7d954d05b68a9c28f79429b09e2bb3681ae2b831e27c735123361c193d66ed4d71f19b199d371ec6bfada7cd370e3fdd3cd980fa1e145fd3f3e96b1feb53c865e1ada08f5d16ed652ee0c7f45352222692fbd679212c225d097aa90f7e1fb1f983415f43e75a19ecf7fd21bfa150ef563aa72ba1c43c5f3d9be128ec26b691f31f9cab931631606a81622f120675c962be2d3b5e95f74f0b209e42e6bdd76e6e725295b1d78d928f6f63c41cbde2ba66ad81168070c8c6e18a6e452a31bdc4a60d637545ed4c8a1c649c3ce54ad3e16304d06a234f5f9311ef0f78924b68dbb4712efdb6974667bdb54f16fd2061b9ba93638dd177227e94e4ebd0ec1d437db948062bf41742000000000000000000305f70dd02fa0c61d5fe6d8ff35389246037e18d34c1375ae04f44f0c2543c772c5ccb137be7dc1874c514b37c668554d77d4ea5ed144a648257f4a0301067bbcd9b91072659d872f26b796e2b81025edb5f45f785e2c2602b248ecdd80f019ca659be7e8ae953325a27564f33c9d458a60be3dab38baab7eb1a66ab1ffd6308f7fd51beb356fe75eb985b7581bb5584c53984ba9c7340f97e8d3825681c53de5f554e595b00000000000000006a8fa9f05d64c4be42f981f00051a39938613067dbd1427e01bfec016e51844cefa8a855bf23ac887b4a88eed6d9443857242f28e31a41d20105fbf3394ff910e734b4d9101265ff729c426e01c1ab13dda8c388b9e6626f19eecb87e39175e85e17000000000000000000009431807e43886903526074e6b40244c938a4c68a38c25ddd7c143b3f14eafe4b28ec66815cf8d1f56aa1424bc9b5d58790298e5b310969e50c222563b54e60854e1bfeef448aca8c5ccbf5546ce4c3cd5a733fec25fb94e1e0f966bcbd28a4d8fe4f556eaa1104a793006619700798354c6ae05025040965e3083562bfa20968c04007d21dc02c9fd1f75e1ff40f439bdde4e784012e52049b483f02f81b88f5f57816b3fecec79cfca8d37203e769759d6b6a56b7605ced8ee18475a77ff0963a565fb6021d216c01b1098e40550a1cfd80e9180100000000000000654cd76ca61fe5ad8a31ec558fdbfa706d5e738bceae81fe777c307d5bc72183a4c2d35732e74dd690c57bdfdc1f069f9491bca7a8c59363799be70018c25ece5ad7307dc7a95c51bc25a8bbe2cf5ddf6aa161693782b0e7feb8a768f391b49d4c978c96dbb52f21c122eba9f17c8bed10591958cf06321a248b5f76ceedfe0d080d6aeadc11b237b3326dd04b86ac37c0d131544888db9e128d059761ad9a393e96c3b41c13c5a381bff187a75de560ba6eb3faa5ff8d2bb3c88f8de5efc2fb2200cfda6d07ceae22577064334fbf76a23e62e6059211d995b879f6b7d3f7fcf03652b81e6b7cdeff947ad185d3c6269ca247b429c3b872a8f1ef60407d29a874f4ec31c9effed55543a65a6b4d778cebcd43b7905f3960140bd783540a7353014bda8e9c7a34a5f428fd1f8eb11e837dd9d586487fdebcb1ecd3a003ff0fda4be617fecf1ff0ef2cdfb7fea73ca18874664d60a4b9423f3297bc8eb91b4ee1d73272abbef3e7a828a7d7ab055a8eb58fe379de85338304e26e3620941b463e9049fd105c74c91cc4d71b0f76e2c2e4825106aa7ce2a3adbbc7a0443ece58e752b47e6f677ec97c5c568a89d6e36b165c39132a0f27080ece2a94c320b002c77f82662675a7713c7067081cac15994698c41ff4754268ae2676384ff799783f55d7e5a1a092a01b965dc99cb7a9d98440c355927629f2bcf9dc2396eb2f5d25829715b24327642ac48f1201014a95e0e65e12cdf27e19043e3c5d3e798375cead35b9a93190a52cdecaaccc854a1d41ef365303f0e9b4fc969c9dab6df5e8a795b140fcc09e8a7b694d12932917facd8ceaa4e2d0d16bb0b95387fcd5ff136d8a673e82e8019dffa3e89aba6755f3bfbddf94daf442bbff744591931872a36cf921ad69f2127386e8b0f9afee4da8d3fbec809fbb3ca0fded2859cf25d4c6155d396c5b9bd1a928923123f63f4c40688eae69990a94194562473867ebdb502e6c2d6938b9274b2d3648857f9a83ab5995d23bd840274764a00d97c64db9805f62e3ce4327cfed7a167ab78a666f960669b69ac9f70840cb95b6450d0437adf84c45aac026843d7db2286c482551978e4ea4eb3747e35e7bf40567003506b9a665291d75106fade70809d6329aa30adbe59fab0046b6a053cd6b4393883f7170901cf1c1c1c714a864329e73ce95c42865f3f1d6bbdfd6f57a04e06815993834764e2062813f38b109dccadfe967473f5dcc5c4a3549472873533cf75f202c3ee586bdf663de385708654be8950b2c6b159cfdebea7134f59bd799714221de0331f8da537ebf5aeadaf21efafbb184a056a16515f470de703db895381101880877339860b69b540a92613d84798793d370fc45891cee8a2e8bcf2f6d9b4b3918e770279f8a01eca72918039b86e69a9e3d873db27b3fddc7"], &(0x7f0000000100)='GPL\x00'}, 0x41) r12 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000000)={&(0x7f0000000080)='sched_switch\x00', r11}, 0x10) bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f00000003c0)={r10, r12, 0x0, 0xd, &(0x7f0000000380)='*,.\xb1/-[,{-\\\xdd\x00'}, 0x30) sendmsg$nl_netfilter(r8, &(0x7f00000004c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000480)={&(0x7f00000003c0)={0xa8, 0x6, 0x7, 0x301, 0x70bd2a, 0x25dfdbfb, {0x2, 0x0, 0x6}, [@nested={0xc, 0x66, 0x0, 0x1, [@typed={0x8, 0x7, 0x0, 0x0, @pid}]}, @nested={0x55, 0x0, 0x0, 0x1, [@typed={0x8, 0x82, 0x0, 0x0, @uid=r9}, @typed={0x8, 0x91, 0x0, 0x0, @pid}, @generic="bee862edbe1b1f620e452a8dcd4d7c23e8c74fe3f0a1ff8cbc8c0e3f4764553d174ba913a554274d27012e22fee00cfd1a", @typed={0x8, 0x35, 0x0, 0x0, @pid=r10}, @typed={0x8, 0x61, 0x0, 0x0, @uid=0xffffffffffffffff}]}, @generic="5927c5107210bc161c85a0b737b16f7c1b1fd59524f821cf2a6f333dce772fb76fa12fdb", @nested={0xc, 0x20, 0x0, 0x1, [@typed={0x8, 0x87, 0x0, 0x0, @ipv4=@remote}]}]}, 0xa8}, 0x1, 0x0, 0x0, 0x40100b0}, 0x240000e4) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r14 = socket$inet6(0xa, 0x1, 0x84) bind$inet6(r14, &(0x7f0000001180)={0xa, 0x4e20}, 0x1c) connect$inet6(r14, &(0x7f00000010c0)={0xa, 0x4e20, 0x0, @loopback}, 0x1c) getsockopt$bt_hci(r14, 0x84, 0x11, &(0x7f0000002280)=""/4090, &(0x7f0000000040)=0xffa) r15 = socket$nl_netfilter(0x10, 0x3, 0xc) sendfile(r15, r13, 0x0, 0x100000002) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, r14, &(0x7f0000000000)={0x8}) mmap(&(0x7f00006af000/0x2000)=nil, 0x2000, 0x5000004, 0x12, r6, 0x1d4aa000) 19:46:42 executing program 3: syz_emit_ethernet(0x66, &(0x7f00000000c0)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b00000000000008e3ff0086dd080088be00000000100000000100000000000000080022eb000000002000000002000000000000000000000008006558000000005db7bf77179b330e7098fb3c0902dd3da00ec62e3b8c1684678279500c15391a2ee33f53cce9976aa9ad194bd17c849fd56a3d526bf310d013acf3b58573296a8f55cb3895ec30d1182d75a5a40a6696ea2e2f37135599ee6d394a87bbb3be1a797c2184950071ef45d13fc03f8077e328fc14163c8ea1442c231cfafd0d9fcaedadffec6404e60b7728726f1831b7f4a64d5ceb703989a27748e0cf2fdffa16541e08955682a924c139fc271b0e5238030cf91b202fa15717d63b405c0a5d3e"], 0x0) syz_emit_ethernet(0x1e, &(0x7f0000000040)=ANY=[@ANYBLOB="aaaaaaaaaa38ff010000000000000091bda5000000000000000000"], &(0x7f0000000080)={0x0, 0x1, [0xdbe, 0x66e, 0xde4, 0x4c2]}) write$tun(0xffffffffffffffff, &(0x7f0000000200)={@val={0x0, 0x1}, @void, @mpls={[{0x2900, 0x0, 0x1}, {0xffe01}, {0x86fe, 0x0, 0x1}, {0x8, 0x0, 0x1}, {0x1ff}, {0xc000}], @llc={@snap={0xab, 0xff, "02a6", "381129", 0xa00, "9425b4ab948f03299e84c12b61d3c7"}}}}, 0x34) syz_emit_ethernet(0x1016, &(0x7f0000000880)=ANY=[@ANYBLOB="000000000000aaaaaaaaaa3491002d0081000e00886cff3704148a7e3f2d4b2a952b7e4df4f656c5b3f8f405a1359da0d9694fd27b1e9f43295edec04c4558fee08c030bea58a4b2677c7156285b8acbf8eeac5b1941386fc05e92a0222c03dfa4020338a758994f9a6e3eb6908ac43dcbc77cc351de9759ed7873ddf6dc99cb43c01f43c80bc865470ab36b35e7f2f20d962f5d67ac9e58910fa505e88b0c8f85eb3ca24b0d5c2b59f29c908a0b9d746a36faf151277b1af0fd759da8e061d78a1a053200343ac109381fc3405f8ded0c7941d0e3495c4c0acb1abfdbd2c1c9758ab5c54c31b6bb84749db5072766be1a13eda10cf6b38bc048b265a5996b2e34fdf926b741f351b5ecea7cf1ccb00dc21594fdcf523622fc0f520d31563460fa1a0465637230cb94a257192ad527436bf44eab1e4bd50187b736fb55dde1791d84dccaa3b3f3a412bb7ae1afa92c9a4cb556f22c12935cc733a66674a42b564562adcc1eada6e3de2d18027421688f80ba2abb4c12350ec963d508cfee86e20894f766fe0522765df5bc0fd3d9d83e0e54e627bbf4a2b2a2b2fdd8b0ba8bc2c4ec16afb3cf162756633be6c564d4dd5feaff6e0d202b9582168b7a4d54224867bc0b9b9af82d89e9f116e02a1814f5423a0c422ab963ad53718b82af92acefee99cc5633433610620aadb7653756f54633be2e9122ee89276f7d337d605cd9bd54be0b7e5c0b37cd3965696b35be803bd9957393b639541bd6f8ab6f8fd1412a06fb0ed77b9fc181da7cf011a50827288bfc09c0bcc649712f683129c948a526b9810c55fa2351ff5e294868196d96d636b1b1ded917a6870158c6f1e627da08d7d716799a3db8a4fd3dff15177f415808c0db061de5abf868ab572fbf4cfebaff071eb1a2d7e906bd69763d6c8e5f4c576773849d494bd43cb508542bd8b8cb1160d0805fa60a2f140825333b8584b170472ff0eb3ed785e9035e12c94378e06323a6c7a66fc503c1cc1c041192ed0fe3805d7932511b015dcf0dbdc4e358b5617dd44df21fabb706ddc16aa185ba861df73403d29cc720070f964272c6dce082d60a3ced4e487bc400083f1ad5c610b7460f33f976fb3543d70d49cc0d489b410b96a7ce77428d3a9a390bfc7dddbb6bd7df5834c8a98d9c9a58c3d36f7db2de51483b76774bbe0fe57059908763b9135a8e030886e49ab1c3cee2d448a193d05d68f3d22bcc92727d4993a1f4e9f09dd4a668afbd3ac7f70a6e5549777a5c5d6d234d36e97a4d5081d62c5702b7f209aa256a86a35fa03cb8be531fe6ace428220b2677415b074e1e65b181c6246d22048ce314bc6d3ed57b7dca5f91cbe75e3c65dd97cf1d049deb78807a58c4320a1e6fb8fcb43829d9c99b0a8af49b82064a7e639ca8af10bd6ca2e69503f2ac9b8939c8f0a5c15db4de8d7bf56449a69ccdaa031af9f22a5bedd4fb164f26c7dff1e32a6783d15e073e4d5c61623c821c52e108618260ed4c2402255ec24f82a884364f0b7bcab318bd4accb7ec6b7787c20b228e442ab33b53008f1ac8b5b1710de26b2aff9fbb3c4362d0b56944b7771fd9ea0e40541eff1a2e04a055837b2943f01e2c2746a28a1decd964e31ef0ecabab2107713f1444e71c145e71a00658662de014805baab682ab9f75fd29707316ff208ed8a2bdc98521d7c1b6bde395da01e43d1ec95d5f150f4bd95ccffb45387d2620cc606b7bd5909449a0ead3a7394383c243c885b0f51b37cc7813c33a29cf1290e6baf6924f070899ea41f3ea7ace1bade77decc6c6c1cdbe4ece040c6c09996ffad5d666da12c8017a15baedcbbe9642f17fdb2ebaff9d3f8f3089ea85b889992d86922b23435fef4f71200279cc135a7c84bebc59e1c609b677b1007cd2296eb632d090da27ec527272896ebcc5ddbe556431c3d1967256f6a937877b49d0ba727b64bb44827da57b7afcfe8752d361097598f50d63fb12e9f1c587c0eabe966eb4803dc428e957429e89c8cb04dfeab0fedbaf9caaf083db237f57a1a2a3575f3f6a8f26db3b27d8b63ccb021908a7727354809867f51f62498d40fecdd30f09b03bdaa9a9b03fc34c6cd0eba12de45a935776ea2ada0cc4fd4740cb877a6d3d9018078621723a846a1c6e5f9356b6ef86f6af50a327ee45cf6a15f5a0465d08eecb0a2b784294e3bbf008984f1d55dd38ab51e3a4715acd6173f753a855655ff61dc7a63a79f603c23aa75a3eb53dd2575f5cc72dce538ad2786a80dd68bc098d422b5e4077c9fc62133a3a45c2cf3014e7ae1e551d509c180f5619bfe0d7b180c01e98ccadc27cbe88296ba5b1538cd182a309a0e4dfe2fe964fe08fb355c2f7c1bcf732c1bd616c994abd0ff6396022cd3d8790e0d6c3b9264bd4ee922414643a2eaf2cc4b9388d948c3a0dcca3f71b7baef1f791a56c902d5ba5ed2849fb90726eae9c084aa3de73d0301cfafa56ac3b59c7830e945eb438187b0da89e236fd83f93fe884badca55ae6dab9bfac8e7479bb226967063f5de326e80212e59ac507cd02864ddab28654416dadd6426654ee11977519f63df012985bf70e9b548b480e277ee1144a008ad3ad66b982ee4a612e2de07f209c7fc63776cce9bf8c9e9af64cfc6b9957dbcf2399c8a5039f65409ec2cfec598006f7f43d3b4bda4067ac2c1e75797655606da14af01d1578caf7b1e360c3cf206a90ee2f43ededa1ffca5e6689034510300101cf3e930da80b31b9ebe3c7734136d161ce77eac6c31b3a76573e253484c28e211b2ace88945270bc52282bb62701510d60ac73e1e5cd6bf306f9d0c20698eb4ae784f31be948580d2aaaef23c696533baefb268afd6e9ff82b1da5032e77c031c5bf7599dff1f75e98382646f2c22f7501103fb3933766e77ee4e6f26a0c0e14c762793a1ad2de9e3c548c72738686312eaeae7156240f78acab715ebcfafeccab3a4cb7d0dbe9eaabc84b30f8ce0d334b444a82946ec2d3455188b5d26355cf7cd23bd22ef783a538bbe0c88ff93b29380fa933e05007f812fc0e281880808cf612891f64f585e66324124f9d94d59836a1560c8745456b5982c0fff2dae034d973211353a7bc3d3b5bc1a06ca6e81ee85b0f3bd1cab125b158838da474da75bdfb6afc154e0d1fd82c37e6b58a341a2f1895d684964bfd872e5ff79eb9980b5875e5310a0ffe060154e800bb7b96e4461cd112980517db29367105ed5b42243b029138a7d14719063879d0a593351f1ae7f0bbdd72ecfa1ae3cbaf204b95d041db5fb16b5e168660894baff54fc65ce74bf2324141cdc85ec10eca59b0999800ce5409451bb087a985e15c924e72afa7351c73b564c519d3308317e653b4e7fd0af0c9071e29e54cbc57c803b58a3dc19fc7c6a736b4863b8686a34ee43aa8c71e420f88ce71af7d3ce24c182b28ec8404bdedaaacf3ad5554486c501868830ef0343456f873634bc8a36d7fb9025701b332bda1fe9e0939a6e5198c3bda81e78d5e9e91125983f5adb20384ac7907a0f9c5ab63b16a1b9551a29d57233b7f1a3c3e1ea17c482f5e4afd80622f97b75f46638720c46e648f049a0840cc7d5622d92a892dc9bef3cef0b9b23cbebd00ac25d96443e257ae472b6d0e9323b44039e960851e5e3f3f2236a9e2a07148f68cc5cd357ed7512d70415b4248e09abde1da67eb7aa94ee8634de4c550acec389337494177d503db954e991fc45280386f2ccefc75acb36d51f98785da15c8c6082cbe94006e0598da23aeb1d411ca41a04ad9e2a975c4f0eab7f90a39c5155947659fa82398471e4aee8baf3706f7c416c0bc8eb99497af659085313a5d5ba3d74972d8a319dbb8b4d48e75c0c5888f393d9561a52593d6b51a6f7502c755028180b14efe991a7c8003c5ca9415297f2a3a192a90e997dd3fa7a130223cbb62206048b563e6e7a1a22f4ae05b4264613a8059c72eed01a9b050b506af38eb55b13204fc53eeff605c428e6e3717246afc0711eea250f23fda8f122288c74dd358dfcff52b5a5bc65d723e38ac488c917c61f6c7456cdd5282685206bddfd4d0cab2ecfff08284782ed453d22092d1274b246ff122beee0a140f2f3ea2f971ab206fd8ac6f5296ce511d98a7c17fe61a9157de29e216f4762215d394019a7eb19da27d1b24673e23c1e97252bac36d2617ab7ed4b3e41fe0a42c3b1deda9b69f09334461f38eae6adff8b87b84ef4548bec81279f6cd0f18751cac823b5af34a2ef1043c9859bbdf5d4bbdbe594d601c20c3904a0d603887b9624fddf5bd01bd73d817c3d79486fd217813f1588abfca5254949fbdd75a337a558cbaa96da976247d3e0bb5cce63b40a56a413289bac2e31fc0a61ebd62e50e1e56130ab01ed564c1a275a00c6c87917d1c6945b446581f96ee71717389ba014af29068440febe7dff9278ba3d16c671c7092d33b35f7a9f54c1ac9af864378d8969080dda37d4d2498b3e38a6d3535711db2625749ab8490f4f6faae81b904fb0049290a6662d05e8d5d01334474b77d582c749c48508bf58df7b4734f9161b6bed30fa70bf05d60fce6b9c7cfa53f4d3c0f706de225bbc83fba0ea6ec0064577e82815da54d757d6c12e7fc65d94c13de70a405aca384f8d3adb26d4a96875655e7837ea6b781b07bc3d2448e8366450176e4ed81287e2066d1587f33fc9db6b94e5eb43e8703d53d37b989901f07f8db5d8021ecd2a3c9ded32159a3ab45b02a7d08878c3c53945ebf4f2929e2e8d3e8fa28342f490f8dfc435f37d99c3f32bd4e95f12676b8585c426859152bd95c3b4dd2dca53a6c31a728148d5d81bd1c132fdbbe90e729bee19ff9b91fe295f746cda7b548af845f133b04d5ba37b447939689a03a927b548610515664ef3b4333ac81a5abd2d4d725c2c9ee95c6ed098415fc496d131378e57e8a48eb6e75c588df75e0ce4724cd621fa5a7d1d3b45e8bdef8e20902859474af0ca5754d1e0d0f3ff26768469553974e29ec6297ef1fa7d01b22b73a4602140804f0811c8009a900bae094ce52f0852fa7f23522bfa162ff209b8c9a6bc2825a5006ea13e546223e3d940ced5f165f2404b892052bb48b16b8836c1bd3b7f6773cdd38f0f7036e83ca68a3ad0fe3ee6c2f87b0e35538b1deb739e193111741d3e7e6350b14533415c955bb88db5cf80a29b28c8f816616d4bb8c42d9a7aa3d6ce4fab89e37275d55a986717406fa54f04d6cae2df0fc0a417dc68abd6136d4b347716d5bb4d0b0d95ad0084a3969c5f793db00762a2dd3abe0818183b2cdfea1769b4b0ce35dac5741324117085f83af2e108c64470c5609bd196e0b7bbd71459a02dd0744d210ee36329d9d937a8b7f4d9e5e8a382e46284a68da389512a0a8817e612a15474c2c7c6373432fc3f9d87b5491f2c33edcb8a7b6d0f23f4fd73ee777a7647feba0d35d12b138629b7c91014c074e7b63d8b39be2e1726acb3937207a91ee290a252f1c5526f104b765b8d48773b0474ceae1f26120355d7ba7b0b0ae0b172866af96bc38525fc377f8aa432be2c2c1cda192ca07eccf5e0884a183c9d194885cc396f6a85ef693867d6ec688a60e5e51a465ed35c2c51855b99429775f96f1d9e63275ad0ff07b6d7a33fb110dac8e621f56da8146b0a285ac3e50a906e9c2557bb67895cbfdc559e6133ed42d0828f68d7815d2297ad5f9ff8d683df3225b8dda0dcf05c8e26a8ee8862b543a915ee868aa9e70d508e2299aecc5a27d89cd6bdad7ddf64baf1fbae56a8727355e23995d3875d98696cc1542841166c307b84c1e529965fe05bf916f8"], &(0x7f0000000000)={0x0, 0x4, [0x5f8, 0x429, 0xaa2, 0x612]}) (async) syz_emit_ethernet(0x1016, &(0x7f0000000880)=ANY=[@ANYBLOB="000000000000aaaaaaaaaa3491002d0081000e00886cff3704148a7e3f2d4b2a952b7e4df4f656c5b3f8f405a1359da0d9694fd27b1e9f43295edec04c4558fee08c030bea58a4b2677c7156285b8acbf8eeac5b1941386fc05e92a0222c03dfa4020338a758994f9a6e3eb6908ac43dcbc77cc351de9759ed7873ddf6dc99cb43c01f43c80bc865470ab36b35e7f2f20d962f5d67ac9e58910fa505e88b0c8f85eb3ca24b0d5c2b59f29c908a0b9d746a36faf151277b1af0fd759da8e061d78a1a053200343ac109381fc3405f8ded0c7941d0e3495c4c0acb1abfdbd2c1c9758ab5c54c31b6bb84749db5072766be1a13eda10cf6b38bc048b265a5996b2e34fdf926b741f351b5ecea7cf1ccb00dc21594fdcf523622fc0f520d31563460fa1a0465637230cb94a257192ad527436bf44eab1e4bd50187b736fb55dde1791d84dccaa3b3f3a412bb7ae1afa92c9a4cb556f22c12935cc733a66674a42b564562adcc1eada6e3de2d18027421688f80ba2abb4c12350ec963d508cfee86e20894f766fe0522765df5bc0fd3d9d83e0e54e627bbf4a2b2a2b2fdd8b0ba8bc2c4ec16afb3cf162756633be6c564d4dd5feaff6e0d202b9582168b7a4d54224867bc0b9b9af82d89e9f116e02a1814f5423a0c422ab963ad53718b82af92acefee99cc5633433610620aadb7653756f54633be2e9122ee89276f7d337d605cd9bd54be0b7e5c0b37cd3965696b35be803bd9957393b639541bd6f8ab6f8fd1412a06fb0ed77b9fc181da7cf011a50827288bfc09c0bcc649712f683129c948a526b9810c55fa2351ff5e294868196d96d636b1b1ded917a6870158c6f1e627da08d7d716799a3db8a4fd3dff15177f415808c0db061de5abf868ab572fbf4cfebaff071eb1a2d7e906bd69763d6c8e5f4c576773849d494bd43cb508542bd8b8cb1160d0805fa60a2f140825333b8584b170472ff0eb3ed785e9035e12c94378e06323a6c7a66fc503c1cc1c041192ed0fe3805d7932511b015dcf0dbdc4e358b5617dd44df21fabb706ddc16aa185ba861df73403d29cc720070f964272c6dce082d60a3ced4e487bc400083f1ad5c610b7460f33f976fb3543d70d49cc0d489b410b96a7ce77428d3a9a390bfc7dddbb6bd7df5834c8a98d9c9a58c3d36f7db2de51483b76774bbe0fe57059908763b9135a8e030886e49ab1c3cee2d448a193d05d68f3d22bcc92727d4993a1f4e9f09dd4a668afbd3ac7f70a6e5549777a5c5d6d234d36e97a4d5081d62c5702b7f209aa256a86a35fa03cb8be531fe6ace428220b2677415b074e1e65b181c6246d22048ce314bc6d3ed57b7dca5f91cbe75e3c65dd97cf1d049deb78807a58c4320a1e6fb8fcb43829d9c99b0a8af49b82064a7e639ca8af10bd6ca2e69503f2ac9b8939c8f0a5c15db4de8d7bf56449a69ccdaa031af9f22a5bedd4fb164f26c7dff1e32a6783d15e073e4d5c61623c821c52e108618260ed4c2402255ec24f82a884364f0b7bcab318bd4accb7ec6b7787c20b228e442ab33b53008f1ac8b5b1710de26b2aff9fbb3c4362d0b56944b7771fd9ea0e40541eff1a2e04a055837b2943f01e2c2746a28a1decd964e31ef0ecabab2107713f1444e71c145e71a00658662de014805baab682ab9f75fd29707316ff208ed8a2bdc98521d7c1b6bde395da01e43d1ec95d5f150f4bd95ccffb45387d2620cc606b7bd5909449a0ead3a7394383c243c885b0f51b37cc7813c33a29cf1290e6baf6924f070899ea41f3ea7ace1bade77decc6c6c1cdbe4ece040c6c09996ffad5d666da12c8017a15baedcbbe9642f17fdb2ebaff9d3f8f3089ea85b889992d86922b23435fef4f71200279cc135a7c84bebc59e1c609b677b1007cd2296eb632d090da27ec527272896ebcc5ddbe556431c3d1967256f6a937877b49d0ba727b64bb44827da57b7afcfe8752d361097598f50d63fb12e9f1c587c0eabe966eb4803dc428e957429e89c8cb04dfeab0fedbaf9caaf083db237f57a1a2a3575f3f6a8f26db3b27d8b63ccb021908a7727354809867f51f62498d40fecdd30f09b03bdaa9a9b03fc34c6cd0eba12de45a935776ea2ada0cc4fd4740cb877a6d3d9018078621723a846a1c6e5f9356b6ef86f6af50a327ee45cf6a15f5a0465d08eecb0a2b784294e3bbf008984f1d55dd38ab51e3a4715acd6173f753a855655ff61dc7a63a79f603c23aa75a3eb53dd2575f5cc72dce538ad2786a80dd68bc098d422b5e4077c9fc62133a3a45c2cf3014e7ae1e551d509c180f5619bfe0d7b180c01e98ccadc27cbe88296ba5b1538cd182a309a0e4dfe2fe964fe08fb355c2f7c1bcf732c1bd616c994abd0ff6396022cd3d8790e0d6c3b9264bd4ee922414643a2eaf2cc4b9388d948c3a0dcca3f71b7baef1f791a56c902d5ba5ed2849fb90726eae9c084aa3de73d0301cfafa56ac3b59c7830e945eb438187b0da89e236fd83f93fe884badca55ae6dab9bfac8e7479bb226967063f5de326e80212e59ac507cd02864ddab28654416dadd6426654ee11977519f63df012985bf70e9b548b480e277ee1144a008ad3ad66b982ee4a612e2de07f209c7fc63776cce9bf8c9e9af64cfc6b9957dbcf2399c8a5039f65409ec2cfec598006f7f43d3b4bda4067ac2c1e75797655606da14af01d1578caf7b1e360c3cf206a90ee2f43ededa1ffca5e6689034510300101cf3e930da80b31b9ebe3c7734136d161ce77eac6c31b3a76573e253484c28e211b2ace88945270bc52282bb62701510d60ac73e1e5cd6bf306f9d0c20698eb4ae784f31be948580d2aaaef23c696533baefb268afd6e9ff82b1da5032e77c031c5bf7599dff1f75e98382646f2c22f7501103fb3933766e77ee4e6f26a0c0e14c762793a1ad2de9e3c548c72738686312eaeae7156240f78acab715ebcfafeccab3a4cb7d0dbe9eaabc84b30f8ce0d334b444a82946ec2d3455188b5d26355cf7cd23bd22ef783a538bbe0c88ff93b29380fa933e05007f812fc0e281880808cf612891f64f585e66324124f9d94d59836a1560c8745456b5982c0fff2dae034d973211353a7bc3d3b5bc1a06ca6e81ee85b0f3bd1cab125b158838da474da75bdfb6afc154e0d1fd82c37e6b58a341a2f1895d684964bfd872e5ff79eb9980b5875e5310a0ffe060154e800bb7b96e4461cd112980517db29367105ed5b42243b029138a7d14719063879d0a593351f1ae7f0bbdd72ecfa1ae3cbaf204b95d041db5fb16b5e168660894baff54fc65ce74bf2324141cdc85ec10eca59b0999800ce5409451bb087a985e15c924e72afa7351c73b564c519d3308317e653b4e7fd0af0c9071e29e54cbc57c803b58a3dc19fc7c6a736b4863b8686a34ee43aa8c71e420f88ce71af7d3ce24c182b28ec8404bdedaaacf3ad5554486c501868830ef0343456f873634bc8a36d7fb9025701b332bda1fe9e0939a6e5198c3bda81e78d5e9e91125983f5adb20384ac7907a0f9c5ab63b16a1b9551a29d57233b7f1a3c3e1ea17c482f5e4afd80622f97b75f46638720c46e648f049a0840cc7d5622d92a892dc9bef3cef0b9b23cbebd00ac25d96443e257ae472b6d0e9323b44039e960851e5e3f3f2236a9e2a07148f68cc5cd357ed7512d70415b4248e09abde1da67eb7aa94ee8634de4c550acec389337494177d503db954e991fc45280386f2ccefc75acb36d51f98785da15c8c6082cbe94006e0598da23aeb1d411ca41a04ad9e2a975c4f0eab7f90a39c5155947659fa82398471e4aee8baf3706f7c416c0bc8eb99497af659085313a5d5ba3d74972d8a319dbb8b4d48e75c0c5888f393d9561a52593d6b51a6f7502c755028180b14efe991a7c8003c5ca9415297f2a3a192a90e997dd3fa7a130223cbb62206048b563e6e7a1a22f4ae05b4264613a8059c72eed01a9b050b506af38eb55b13204fc53eeff605c428e6e3717246afc0711eea250f23fda8f122288c74dd358dfcff52b5a5bc65d723e38ac488c917c61f6c7456cdd5282685206bddfd4d0cab2ecfff08284782ed453d22092d1274b246ff122beee0a140f2f3ea2f971ab206fd8ac6f5296ce511d98a7c17fe61a9157de29e216f4762215d394019a7eb19da27d1b24673e23c1e97252bac36d2617ab7ed4b3e41fe0a42c3b1deda9b69f09334461f38eae6adff8b87b84ef4548bec81279f6cd0f18751cac823b5af34a2ef1043c9859bbdf5d4bbdbe594d601c20c3904a0d603887b9624fddf5bd01bd73d817c3d79486fd217813f1588abfca5254949fbdd75a337a558cbaa96da976247d3e0bb5cce63b40a56a413289bac2e31fc0a61ebd62e50e1e56130ab01ed564c1a275a00c6c87917d1c6945b446581f96ee71717389ba014af29068440febe7dff9278ba3d16c671c7092d33b35f7a9f54c1ac9af864378d8969080dda37d4d2498b3e38a6d3535711db2625749ab8490f4f6faae81b904fb0049290a6662d05e8d5d01334474b77d582c749c48508bf58df7b4734f9161b6bed30fa70bf05d60fce6b9c7cfa53f4d3c0f706de225bbc83fba0ea6ec0064577e82815da54d757d6c12e7fc65d94c13de70a405aca384f8d3adb26d4a96875655e7837ea6b781b07bc3d2448e8366450176e4ed81287e2066d1587f33fc9db6b94e5eb43e8703d53d37b989901f07f8db5d8021ecd2a3c9ded32159a3ab45b02a7d08878c3c53945ebf4f2929e2e8d3e8fa28342f490f8dfc435f37d99c3f32bd4e95f12676b8585c426859152bd95c3b4dd2dca53a6c31a728148d5d81bd1c132fdbbe90e729bee19ff9b91fe295f746cda7b548af845f133b04d5ba37b447939689a03a927b548610515664ef3b4333ac81a5abd2d4d725c2c9ee95c6ed098415fc496d131378e57e8a48eb6e75c588df75e0ce4724cd621fa5a7d1d3b45e8bdef8e20902859474af0ca5754d1e0d0f3ff26768469553974e29ec6297ef1fa7d01b22b73a4602140804f0811c8009a900bae094ce52f0852fa7f23522bfa162ff209b8c9a6bc2825a5006ea13e546223e3d940ced5f165f2404b892052bb48b16b8836c1bd3b7f6773cdd38f0f7036e83ca68a3ad0fe3ee6c2f87b0e35538b1deb739e193111741d3e7e6350b14533415c955bb88db5cf80a29b28c8f816616d4bb8c42d9a7aa3d6ce4fab89e37275d55a986717406fa54f04d6cae2df0fc0a417dc68abd6136d4b347716d5bb4d0b0d95ad0084a3969c5f793db00762a2dd3abe0818183b2cdfea1769b4b0ce35dac5741324117085f83af2e108c64470c5609bd196e0b7bbd71459a02dd0744d210ee36329d9d937a8b7f4d9e5e8a382e46284a68da389512a0a8817e612a15474c2c7c6373432fc3f9d87b5491f2c33edcb8a7b6d0f23f4fd73ee777a7647feba0d35d12b138629b7c91014c074e7b63d8b39be2e1726acb3937207a91ee290a252f1c5526f104b765b8d48773b0474ceae1f26120355d7ba7b0b0ae0b172866af96bc38525fc377f8aa432be2c2c1cda192ca07eccf5e0884a183c9d194885cc396f6a85ef693867d6ec688a60e5e51a465ed35c2c51855b99429775f96f1d9e63275ad0ff07b6d7a33fb110dac8e621f56da8146b0a285ac3e50a906e9c2557bb67895cbfdc559e6133ed42d0828f68d7815d2297ad5f9ff8d683df3225b8dda0dcf05c8e26a8ee8862b543a915ee868aa9e70d508e2299aecc5a27d89cd6bdad7ddf64baf1fbae56a8727355e23995d3875d98696cc1542841166c307b84c1e529965fe05bf916f8"], &(0x7f0000000000)={0x0, 0x4, [0x5f8, 0x429, 0xaa2, 0x612]}) 19:46:42 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2e020000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:42 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x24000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:42 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000280)='cgroup.controllers\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cpuset.effective_cpus\x00', 0x275a, 0x0) ioctl$FS_IOC_SETFLAGS(r0, 0x40086602, &(0x7f0000000100)) (async) ioctl$FS_IOC_SETFLAGS(r0, 0x40086602, &(0x7f0000000100)) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000000080)={0x288000c, r0}) (async) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000000080)={0x288000c, r0}) socket$nl_generic(0x10, 0x3, 0x10) (async) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) write$binfmt_misc(r2, &(0x7f0000000500)={'syz1', "4a7547aff0800d1fa75912a4dc682001e220889a5d16a1043e6b38b6b59622b53384762ba177dc8ebdd13f37c53db1e31c801e6a256c9ca3e9ffe2a3f4d1e9f48bdf5e95640125b1bc2e35860dc8c77195db9e5b8f58c741f51b97b614f9d8558e20af5c5c49c6a5407ae93fd808435118b4778b03294d51ed4af0b12388972e8def2a78241f9268e913a1caa3938a997f37a3b4ed07a6ebf9e34d3e7724a30de080e6f9f7826feab92d3e89020fff67440ec95f406abb6943a4774fe2eea59128a7af9a035943294fab02547887a0e569ed46dc570e3dfe1490b3d89e5152a992e3f263"}, 0xe8) openat$cgroup_ro(r0, &(0x7f0000000140)='blkio.bfq.io_wait_time_recursive\x00', 0x0, 0x0) (async) r5 = openat$cgroup_ro(r0, &(0x7f0000000140)='blkio.bfq.io_wait_time_recursive\x00', 0x0, 0x0) openat$cgroup_ro(r5, &(0x7f0000000180)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) (async) r6 = openat$cgroup_ro(r5, &(0x7f0000000180)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r7, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x100000b, 0x28011, r6, 0x0) r8 = bpf$ITER_CREATE(0x21, &(0x7f00000001c0)={r7}, 0x8) getsockopt$inet_IP_XFRM_POLICY(r1, 0x0, 0x11, &(0x7f00000002c0)={{{@in=@multicast1, @in6=@empty, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}}}, &(0x7f0000000240)=0xe8) r10 = gettid() r11 = bpf$PROG_LOAD(0x5, &(0x7f0000000080)={0x11, 0x8, &(0x7f0000003500)=ANY=[@ANYBLOB="620af8ffa1dc0021bfa100000000000007010000f8ffffffb702000007000000bd120000000000008500000010000000b70000000000000095000000000000003faf4f2aa3d9b18ed812a2e2c49e8020a6f4e0e4a9446c7670568982b4e020f698393aa0f3881f9c24561f1b2607995daa56f151905ea23c22624c9f87f9793f50bb546040677b0c5077da80fb982c1e9400e693146cea484a415b76966118b64b751a0f241b072e90080008002d75593a286cecc93e64c227c95aa0b784625704f07372c29184ff7f4a7c0000070000006056feb4cc664c0af9360a1f7a5e6b607130c89f18c0c1088d8b8588d72ec29c48b45e0000000000000401d01aa27ae8b09e00e79ab20b0b8ed8fb7a68af2ad0810000000000006fa03c6468978089b302d7ff6023cdcedb5e0125ebbcebdde510cb2364149215108337719acd97cfa107d40224edc5465ad32b77a74e802a0dc6bf25cca242bc6099ad2300000480006ef6c1ff0900000000000010c63a949e8b7955394ffaff03000000000000ab87b1bfeda7be586602d985430cea080000000000000026abfb0767192361448279b05d96a703a660581eecdbf5bcd3de227a167ca17a0faf60fd6ad9b97aa5fa68480366c9c6fd6fa5043aa3926b81e3b59c9b081d6a08000000ea2b1a52496dfcaf99431412fd134a996382a1a04d5bb924cfe5f3185418d605ffff9c4d2ec7c32f2095e63c80aff9fa740b6c7632d5933a1c1fa5605bd7603f2ba2a790d62d6faec2fed44da4928b30142ba1fde5c5d50b83bae645ffa4997da9c77af4c0cb97fca585ec6bf58351d578be00d952aab9c71764b0a8a7583c90b3433b809bdb9fbd48fc877505ebf6c9d13330ca006bce1a84521f14518c9b476fccbd6c712016219848624b87cec2dbe98223d8d9e86c5ea06d108d8f80a0eb4fa39f6b5c02e6d6d90756ff578f57000000009700cf0b4b8bc229413300000000000000000003000000000000000000000000001000000000559711e6e8fcffffffffffffffb2d02edc3e01dd271c896249ed85b980680b09000000000f0000169cdcacc413b48dafb7a2c8cb482bac0ac502d9ba96ffffffd897ef3b7cda42f83d53046da21b40216e14ba2d6af8656b01e17addaedab25b30002abbba7fa725f38400be7c1f001b2cd317902f19e385be9e48dccf1f9f3282830689da6b53b263339863297771d74732d400003341bf4a00fc9fec2271ff01589646efd1cf870cd7bb2366fde4a594290c405ff870ce5dfd3467decb05cfd9fcb32c8ed1dbd9f70a64c108285e71b5565b1768ee58969c41595229df17bcad70fb4021428ce970275d13b78249788f11f761038b75d4fe32b561d46ea3abe0fa7956488bef241875f3b4b6ab7929a57affe760e797724f4fce1093b62d7e8c7123d890decacec55bf404e4e1f74b7eed82571be54c72d978cf906df0042e36acd37d7f9e109f2c06f815312e0cfe222a06f56dd022c074eb8a322fb0bf47c0a8d154b405c37feaf3dd95f6ef2acd1fe582786105c70600000000000000b7561301bb997316dbf17866fb84d4173731efe895ff2e1c5560926e90109b598502d3e959efc71f665c542c9062ece84c99a061887a20639b41c8c12ee86c50804042b3eac1f870b136345cf67ca3fb5aac518a75f9e7d7101da841735e186c489b3a06fb99e0347f23a054de2f4d92d6bd72ee2c9fdc75aaaf1e3e483b4ad05573af403269b4a39ce40293947d9a631bcbf3583784acbda216550d7aec6b79e30cbd128f91e358c3b377327ac9ecc34f24c9ae153ec60ac0694da85bff9f5f4df90400000000000000d6b2c5ea1393fdf24285bf16b99c9cc0ad1857216f1a985f369191ae954febb3df464bfe0f7f3ee9afe7befb89d2777399f5874c553aeb3729cffe86e669261192899d4562db0e22d564ae09bb6d163118e401e024fd452277c3887d6116c6cc9d8046c216c1f895778cb26e22a2a998de44aeadea2a40da8daccf080842a486721737390cbf3a74cb2003016f154772f514216bdf57d2a40d40b51ab67903008485b3b8a8c9ae3d14f93100c2e0893862eef552fcde2981f48c482bde8a168c3f5db2fea6f26e4a4304e50c349f4f9ecee27defc93871c5f99a3594191e104d417e60fc3541a2c905a1a95e9571bf38ae1981c4238ecaee6f75cd0a6881bd1517a8250df98674152f94e32409e2a3bce109b6000000000000a1fec9000000d694210d7560eb92d6a97a27602b81f76386f1535bef1497f92186086e29c6bc5a1fad6ec9a31137ab79a404abde7750898b59270bb29b81367ac91bd627e87306703be8672d70d1ab57075228a9f46ed9bd1f00fb8191bbab2dc591dda61f0868afc4294859323e7a45319f18101288a0268893373750d1a8fe64680b0a3fc22dd704e4214d00000000d6c98cd1a9fbe1e7d58c08acaf30065b928a31d2eca55f74a23641f61f2d5b308cf0d031b0c7f0ce21d69993e9960ff5f76015e6009756237badf4e7965bbe2777e808fcba821a00e8c5c39609ff854356cb490000000000c1fee30a3f7a85d1b29e58c77685efc0ceb1c8e5729c66018d169fc03aa188546b3ad2a182068e1e3a0e2505bc7f41019645466ac96e0d0b3bc19faa5449209b085f3c334b47f067bbab40743b2a428f1da1f68df75cf43f8ecc8d3726602111b40e761fd21081920382f14d12ca3c471c784ae7da7eaa69eb7f7f80572fdd11bb1d070080fbc22bf73468788df51710eb0b428ee751c47d8e894f745a868404a0bf35f0121008b722b1eaa6aedfa1bf2e7ccb2d61d5d76331ff5e20fa26b8471d42645288d7226bbd9c9e9e1cc9eb3d541e407cc2dae5e690cd628ab84875f2c50ba830d3f474b079b407000000deff000040430a537a395dc73bda367bf12cb7d81691a5fe8c47be395656a297e9df0e71b967ce7daac4be290159f6bcd75f0dda9de5532e66ae9e48b0ed1254a81faae79b6af6fbb869604d51de44c4e0973171ad47d6c00ebc7603093f000000fdec743af930cd6db49a47613808bad959719c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f15d6533f78a1f4e2df4ca23d867693fd42de9b49a1b36d48a44ba6a4530e59bec53e876dc660dd63bed8d31c31c37a373d4efd89f0000377b1b1292a893a516dab183ee65744fb8fc4f9ce2242e0f0059161c5e0000000000000000000057d77480e0345effff6413258d1f6eb190aa28cbb4bafe34124172e436b176c7ed4b132fb805d5edd9d188daf28d89c014c3ecca10ae55704544673e1fa03b84f63e022fe755f4007a4a899eaf52c4f491d8e97c862e29e457060000007ac691faee1e0c8fe056a07474e6e5490a7d3c3402000000b60600d837c6befc63ddf2f594ad7cbc56a1e44d218c956a5392a995f1fae8e9f206efbb33854dc70104d74dc07748f9745cb796da2dfb714a0500000000000000faed94fc39acfb3fd25dfa8116a154cd1226e1bb72b59fed817072a0da60160761fd3dffda0f7c592eabd8ab68334d2a1693cb187539049e331272bf5135044df8161400211b8012b6eb1ed5656e83f65509bb4b323c5bd61bff949d3bade2f6ffda1360c2786e16937ab61d6dcafed319c716357d0885f9c6d1f442954c167dd9b4acd9468ce3674c82bbb2e31389179b025dbe063b7f906217b2cf8410c7023aa3e5cc3ba1000000000000000000000000000000006ae6301a2da44394275c582a6516bb92ea1980a0a659f2f1811c8b281c209647c4241f292b20508b215dde27bb2487a6e2b5e4a8ccfab90c23827ef06cbe364073005f8a6d1456aaeb85ffb7858f24eced67a67ab825e863928ed64c83f62ffdaa997657335b63c6b4163aff094059e626766845fd779c9e6cdbbd64c24936615ee68538e8fddd0d90f3a7579579a142c0f7b318264d5c13c31cf475829528267ead38523cab7e1664e8426ca85e82ccf821c8a02a7e7d954d05b68a9c28f79429b09e2bb3681ae2b831e27c735123361c193d66ed4d71f19b199d371ec6bfada7cd370e3fdd3cd980fa1e145fd3f3e96b1feb53c865e1ada08f5d16ed652ee0c7f45352222692fbd679212c225d097aa90f7e1fb1f983415f43e75a19ecf7fd21bfa150ef563aa72ba1c43c5f3d9be128ec26b691f31f9cab931631606a81622f120675c962be2d3b5e95f74f0b209e42e6bdd76e6e725295b1d78d928f6f63c41cbde2ba66ad81168070c8c6e18a6e452a31bdc4a60d637545ed4c8a1c649c3ce54ad3e16304d06a234f5f9311ef0f78924b68dbb4712efdb6974667bdb54f16fd2061b9ba93638dd177227e94e4ebd0ec1d437db948062bf41742000000000000000000305f70dd02fa0c61d5fe6d8ff35389246037e18d34c1375ae04f44f0c2543c772c5ccb137be7dc1874c514b37c668554d77d4ea5ed144a648257f4a0301067bbcd9b91072659d872f26b796e2b81025edb5f45f785e2c2602b248ecdd80f019ca659be7e8ae953325a27564f33c9d458a60be3dab38baab7eb1a66ab1ffd6308f7fd51beb356fe75eb985b7581bb5584c53984ba9c7340f97e8d3825681c53de5f554e595b00000000000000006a8fa9f05d64c4be42f981f00051a39938613067dbd1427e01bfec016e51844cefa8a855bf23ac887b4a88eed6d9443857242f28e31a41d20105fbf3394ff910e734b4d9101265ff729c426e01c1ab13dda8c388b9e6626f19eecb87e39175e85e17000000000000000000009431807e43886903526074e6b40244c938a4c68a38c25ddd7c143b3f14eafe4b28ec66815cf8d1f56aa1424bc9b5d58790298e5b310969e50c222563b54e60854e1bfeef448aca8c5ccbf5546ce4c3cd5a733fec25fb94e1e0f966bcbd28a4d8fe4f556eaa1104a793006619700798354c6ae05025040965e3083562bfa20968c04007d21dc02c9fd1f75e1ff40f439bdde4e784012e52049b483f02f81b88f5f57816b3fecec79cfca8d37203e769759d6b6a56b7605ced8ee18475a77ff0963a565fb6021d216c01b1098e40550a1cfd80e9180100000000000000654cd76ca61fe5ad8a31ec558fdbfa706d5e738bceae81fe777c307d5bc72183a4c2d35732e74dd690c57bdfdc1f069f9491bca7a8c59363799be70018c25ece5ad7307dc7a95c51bc25a8bbe2cf5ddf6aa161693782b0e7feb8a768f391b49d4c978c96dbb52f21c122eba9f17c8bed10591958cf06321a248b5f76ceedfe0d080d6aeadc11b237b3326dd04b86ac37c0d131544888db9e128d059761ad9a393e96c3b41c13c5a381bff187a75de560ba6eb3faa5ff8d2bb3c88f8de5efc2fb2200cfda6d07ceae22577064334fbf76a23e62e6059211d995b879f6b7d3f7fcf03652b81e6b7cdeff947ad185d3c6269ca247b429c3b872a8f1ef60407d29a874f4ec31c9effed55543a65a6b4d778cebcd43b7905f3960140bd783540a7353014bda8e9c7a34a5f428fd1f8eb11e837dd9d586487fdebcb1ecd3a003ff0fda4be617fecf1ff0ef2cdfb7fea73ca18874664d60a4b9423f3297bc8eb91b4ee1d73272abbef3e7a828a7d7ab055a8eb58fe379de85338304e26e3620941b463e9049fd105c74c91cc4d71b0f76e2c2e4825106aa7ce2a3adbbc7a0443ece58e752b47e6f677ec97c5c568a89d6e36b165c39132a0f27080ece2a94c320b002c77f82662675a7713c7067081cac15994698c41ff4754268ae2676384ff799783f55d7e5a1a092a01b965dc99cb7a9d98440c355927629f2bcf9dc2396eb2f5d25829715b24327642ac48f1201014a95e0e65e12cdf27e19043e3c5d3e798375cead35b9a93190a52cdecaaccc854a1d41ef365303f0e9b4fc969c9dab6df5e8a795b140fcc09e8a7b694d12932917facd8ceaa4e2d0d16bb0b95387fcd5ff136d8a673e82e8019dffa3e89aba6755f3bfbddf94daf442bbff744591931872a36cf921ad69f2127386e8b0f9afee4da8d3fbec809fbb3ca0fded2859cf25d4c6155d396c5b9bd1a928923123f63f4c40688eae69990a94194562473867ebdb502e6c2d6938b9274b2d3648857f9a83ab5995d23bd840274764a00d97c64db9805f62e3ce4327cfed7a167ab78a666f960669b69ac9f70840cb95b6450d0437adf84c45aac026843d7db2286c482551978e4ea4eb3747e35e7bf40567003506b9a665291d75106fade70809d6329aa30adbe59fab0046b6a053cd6b4393883f7170901cf1c1c1c714a864329e73ce95c42865f3f1d6bbdfd6f57a04e06815993834764e2062813f38b109dccadfe967473f5dcc5c4a3549472873533cf75f202c3ee586bdf663de385708654be8950b2c6b159cfdebea7134f59bd799714221de0331f8da537ebf5aeadaf21efafbb184a056a16515f470de703db895381101880877339860b69b540a92613d84798793d370fc45891cee8a2e8bcf2f6d9b4b3918e770279f8a01eca72918039b86e69a9e3d873db27b3fddc7"], &(0x7f0000000100)='GPL\x00'}, 0x41) r12 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000000)={&(0x7f0000000080)='sched_switch\x00', r11}, 0x10) bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f00000003c0)={r10, r12, 0x0, 0xd, &(0x7f0000000380)='*,.\xb1/-[,{-\\\xdd\x00'}, 0x30) (async) bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f00000003c0)={r10, r12, 0x0, 0xd, &(0x7f0000000380)='*,.\xb1/-[,{-\\\xdd\x00'}, 0x30) sendmsg$nl_netfilter(r8, &(0x7f00000004c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000480)={&(0x7f00000003c0)={0xa8, 0x6, 0x7, 0x301, 0x70bd2a, 0x25dfdbfb, {0x2, 0x0, 0x6}, [@nested={0xc, 0x66, 0x0, 0x1, [@typed={0x8, 0x7, 0x0, 0x0, @pid}]}, @nested={0x55, 0x0, 0x0, 0x1, [@typed={0x8, 0x82, 0x0, 0x0, @uid=r9}, @typed={0x8, 0x91, 0x0, 0x0, @pid}, @generic="bee862edbe1b1f620e452a8dcd4d7c23e8c74fe3f0a1ff8cbc8c0e3f4764553d174ba913a554274d27012e22fee00cfd1a", @typed={0x8, 0x35, 0x0, 0x0, @pid=r10}, @typed={0x8, 0x61, 0x0, 0x0, @uid=0xffffffffffffffff}]}, @generic="5927c5107210bc161c85a0b737b16f7c1b1fd59524f821cf2a6f333dce772fb76fa12fdb", @nested={0xc, 0x20, 0x0, 0x1, [@typed={0x8, 0x87, 0x0, 0x0, @ipv4=@remote}]}]}, 0xa8}, 0x1, 0x0, 0x0, 0x40100b0}, 0x240000e4) (async) sendmsg$nl_netfilter(r8, &(0x7f00000004c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000480)={&(0x7f00000003c0)={0xa8, 0x6, 0x7, 0x301, 0x70bd2a, 0x25dfdbfb, {0x2, 0x0, 0x6}, [@nested={0xc, 0x66, 0x0, 0x1, [@typed={0x8, 0x7, 0x0, 0x0, @pid}]}, @nested={0x55, 0x0, 0x0, 0x1, [@typed={0x8, 0x82, 0x0, 0x0, @uid=r9}, @typed={0x8, 0x91, 0x0, 0x0, @pid}, @generic="bee862edbe1b1f620e452a8dcd4d7c23e8c74fe3f0a1ff8cbc8c0e3f4764553d174ba913a554274d27012e22fee00cfd1a", @typed={0x8, 0x35, 0x0, 0x0, @pid=r10}, @typed={0x8, 0x61, 0x0, 0x0, @uid=0xffffffffffffffff}]}, @generic="5927c5107210bc161c85a0b737b16f7c1b1fd59524f821cf2a6f333dce772fb76fa12fdb", @nested={0xc, 0x20, 0x0, 0x1, [@typed={0x8, 0x87, 0x0, 0x0, @ipv4=@remote}]}]}, 0xa8}, 0x1, 0x0, 0x0, 0x40100b0}, 0x240000e4) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r14 = socket$inet6(0xa, 0x1, 0x84) bind$inet6(r14, &(0x7f0000001180)={0xa, 0x4e20}, 0x1c) connect$inet6(r14, &(0x7f00000010c0)={0xa, 0x4e20, 0x0, @loopback}, 0x1c) (async) connect$inet6(r14, &(0x7f00000010c0)={0xa, 0x4e20, 0x0, @loopback}, 0x1c) getsockopt$bt_hci(r14, 0x84, 0x11, &(0x7f0000002280)=""/4090, &(0x7f0000000040)=0xffa) r15 = socket$nl_netfilter(0x10, 0x3, 0xc) sendfile(r15, r13, 0x0, 0x100000002) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, r14, &(0x7f0000000000)={0x8}) mmap(&(0x7f00006af000/0x2000)=nil, 0x2000, 0x5000004, 0x12, r6, 0x1d4aa000) 19:46:42 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x5b8d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:42 executing program 3: syz_emit_ethernet(0x66, &(0x7f00000000c0)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b00000000000008e3ff0086dd080088be00000000100000000100000000000000080022eb000000002000000002000000000000000000000008006558000000005db7bf77179b330e7098fb3c0902dd3da00ec62e3b8c1684678279500c15391a2ee33f53cce9976aa9ad194bd17c849fd56a3d526bf310d013acf3b58573296a8f55cb3895ec30d1182d75a5a40a6696ea2e2f37135599ee6d394a87bbb3be1a797c2184950071ef45d13fc03f8077e328fc14163c8ea1442c231cfafd0d9fcaedadffec6404e60b7728726f1831b7f4a64d5ceb703989a27748e0cf2fdffa16541e08955682a924c139fc271b0e5238030cf91b202fa15717d63b405c0a5d3e"], 0x0) (async, rerun: 64) syz_emit_ethernet(0x1e, &(0x7f0000000040)=ANY=[@ANYBLOB="aaaaaaaaaa38ff010000000000000091bda5000000000000000000"], &(0x7f0000000080)={0x0, 0x1, [0xdbe, 0x66e, 0xde4, 0x4c2]}) (async, rerun: 64) write$tun(0xffffffffffffffff, &(0x7f0000000200)={@val={0x0, 0x1}, @void, @mpls={[{0x2900, 0x0, 0x1}, {0xffe01}, {0x86fe, 0x0, 0x1}, {0x8, 0x0, 0x1}, {0x1ff}, {0xc000}], @llc={@snap={0xab, 0xff, "02a6", "381129", 0xa00, "9425b4ab948f03299e84c12b61d3c7"}}}}, 0x34) (async) syz_emit_ethernet(0x1016, &(0x7f0000000880)=ANY=[@ANYBLOB="000000000000aaaaaaaaaa3491002d0081000e00886cff3704148a7e3f2d4b2a952b7e4df4f656c5b3f8f405a1359da0d9694fd27b1e9f43295edec04c4558fee08c030bea58a4b2677c7156285b8acbf8eeac5b1941386fc05e92a0222c03dfa4020338a758994f9a6e3eb6908ac43dcbc77cc351de9759ed7873ddf6dc99cb43c01f43c80bc865470ab36b35e7f2f20d962f5d67ac9e58910fa505e88b0c8f85eb3ca24b0d5c2b59f29c908a0b9d746a36faf151277b1af0fd759da8e061d78a1a053200343ac109381fc3405f8ded0c7941d0e3495c4c0acb1abfdbd2c1c9758ab5c54c31b6bb84749db5072766be1a13eda10cf6b38bc048b265a5996b2e34fdf926b741f351b5ecea7cf1ccb00dc21594fdcf523622fc0f520d31563460fa1a0465637230cb94a257192ad527436bf44eab1e4bd50187b736fb55dde1791d84dccaa3b3f3a412bb7ae1afa92c9a4cb556f22c12935cc733a66674a42b564562adcc1eada6e3de2d18027421688f80ba2abb4c12350ec963d508cfee86e20894f766fe0522765df5bc0fd3d9d83e0e54e627bbf4a2b2a2b2fdd8b0ba8bc2c4ec16afb3cf162756633be6c564d4dd5feaff6e0d202b9582168b7a4d54224867bc0b9b9af82d89e9f116e02a1814f5423a0c422ab963ad53718b82af92acefee99cc5633433610620aadb7653756f54633be2e9122ee89276f7d337d605cd9bd54be0b7e5c0b37cd3965696b35be803bd9957393b639541bd6f8ab6f8fd1412a06fb0ed77b9fc181da7cf011a50827288bfc09c0bcc649712f683129c948a526b9810c55fa2351ff5e294868196d96d636b1b1ded917a6870158c6f1e627da08d7d716799a3db8a4fd3dff15177f415808c0db061de5abf868ab572fbf4cfebaff071eb1a2d7e906bd69763d6c8e5f4c576773849d494bd43cb508542bd8b8cb1160d0805fa60a2f140825333b8584b170472ff0eb3ed785e9035e12c94378e06323a6c7a66fc503c1cc1c041192ed0fe3805d7932511b015dcf0dbdc4e358b5617dd44df21fabb706ddc16aa185ba861df73403d29cc720070f964272c6dce082d60a3ced4e487bc400083f1ad5c610b7460f33f976fb3543d70d49cc0d489b410b96a7ce77428d3a9a390bfc7dddbb6bd7df5834c8a98d9c9a58c3d36f7db2de51483b76774bbe0fe57059908763b9135a8e030886e49ab1c3cee2d448a193d05d68f3d22bcc92727d4993a1f4e9f09dd4a668afbd3ac7f70a6e5549777a5c5d6d234d36e97a4d5081d62c5702b7f209aa256a86a35fa03cb8be531fe6ace428220b2677415b074e1e65b181c6246d22048ce314bc6d3ed57b7dca5f91cbe75e3c65dd97cf1d049deb78807a58c4320a1e6fb8fcb43829d9c99b0a8af49b82064a7e639ca8af10bd6ca2e69503f2ac9b8939c8f0a5c15db4de8d7bf56449a69ccdaa031af9f22a5bedd4fb164f26c7dff1e32a6783d15e073e4d5c61623c821c52e108618260ed4c2402255ec24f82a884364f0b7bcab318bd4accb7ec6b7787c20b228e442ab33b53008f1ac8b5b1710de26b2aff9fbb3c4362d0b56944b7771fd9ea0e40541eff1a2e04a055837b2943f01e2c2746a28a1decd964e31ef0ecabab2107713f1444e71c145e71a00658662de014805baab682ab9f75fd29707316ff208ed8a2bdc98521d7c1b6bde395da01e43d1ec95d5f150f4bd95ccffb45387d2620cc606b7bd5909449a0ead3a7394383c243c885b0f51b37cc7813c33a29cf1290e6baf6924f070899ea41f3ea7ace1bade77decc6c6c1cdbe4ece040c6c09996ffad5d666da12c8017a15baedcbbe9642f17fdb2ebaff9d3f8f3089ea85b889992d86922b23435fef4f71200279cc135a7c84bebc59e1c609b677b1007cd2296eb632d090da27ec527272896ebcc5ddbe556431c3d1967256f6a937877b49d0ba727b64bb44827da57b7afcfe8752d361097598f50d63fb12e9f1c587c0eabe966eb4803dc428e957429e89c8cb04dfeab0fedbaf9caaf083db237f57a1a2a3575f3f6a8f26db3b27d8b63ccb021908a7727354809867f51f62498d40fecdd30f09b03bdaa9a9b03fc34c6cd0eba12de45a935776ea2ada0cc4fd4740cb877a6d3d9018078621723a846a1c6e5f9356b6ef86f6af50a327ee45cf6a15f5a0465d08eecb0a2b784294e3bbf008984f1d55dd38ab51e3a4715acd6173f753a855655ff61dc7a63a79f603c23aa75a3eb53dd2575f5cc72dce538ad2786a80dd68bc098d422b5e4077c9fc62133a3a45c2cf3014e7ae1e551d509c180f5619bfe0d7b180c01e98ccadc27cbe88296ba5b1538cd182a309a0e4dfe2fe964fe08fb355c2f7c1bcf732c1bd616c994abd0ff6396022cd3d8790e0d6c3b9264bd4ee922414643a2eaf2cc4b9388d948c3a0dcca3f71b7baef1f791a56c902d5ba5ed2849fb90726eae9c084aa3de73d0301cfafa56ac3b59c7830e945eb438187b0da89e236fd83f93fe884badca55ae6dab9bfac8e7479bb226967063f5de326e80212e59ac507cd02864ddab28654416dadd6426654ee11977519f63df012985bf70e9b548b480e277ee1144a008ad3ad66b982ee4a612e2de07f209c7fc63776cce9bf8c9e9af64cfc6b9957dbcf2399c8a5039f65409ec2cfec598006f7f43d3b4bda4067ac2c1e75797655606da14af01d1578caf7b1e360c3cf206a90ee2f43ededa1ffca5e6689034510300101cf3e930da80b31b9ebe3c7734136d161ce77eac6c31b3a76573e253484c28e211b2ace88945270bc52282bb62701510d60ac73e1e5cd6bf306f9d0c20698eb4ae784f31be948580d2aaaef23c696533baefb268afd6e9ff82b1da5032e77c031c5bf7599dff1f75e98382646f2c22f7501103fb3933766e77ee4e6f26a0c0e14c762793a1ad2de9e3c548c72738686312eaeae7156240f78acab715ebcfafeccab3a4cb7d0dbe9eaabc84b30f8ce0d334b444a82946ec2d3455188b5d26355cf7cd23bd22ef783a538bbe0c88ff93b29380fa933e05007f812fc0e281880808cf612891f64f585e66324124f9d94d59836a1560c8745456b5982c0fff2dae034d973211353a7bc3d3b5bc1a06ca6e81ee85b0f3bd1cab125b158838da474da75bdfb6afc154e0d1fd82c37e6b58a341a2f1895d684964bfd872e5ff79eb9980b5875e5310a0ffe060154e800bb7b96e4461cd112980517db29367105ed5b42243b029138a7d14719063879d0a593351f1ae7f0bbdd72ecfa1ae3cbaf204b95d041db5fb16b5e168660894baff54fc65ce74bf2324141cdc85ec10eca59b0999800ce5409451bb087a985e15c924e72afa7351c73b564c519d3308317e653b4e7fd0af0c9071e29e54cbc57c803b58a3dc19fc7c6a736b4863b8686a34ee43aa8c71e420f88ce71af7d3ce24c182b28ec8404bdedaaacf3ad5554486c501868830ef0343456f873634bc8a36d7fb9025701b332bda1fe9e0939a6e5198c3bda81e78d5e9e91125983f5adb20384ac7907a0f9c5ab63b16a1b9551a29d57233b7f1a3c3e1ea17c482f5e4afd80622f97b75f46638720c46e648f049a0840cc7d5622d92a892dc9bef3cef0b9b23cbebd00ac25d96443e257ae472b6d0e9323b44039e960851e5e3f3f2236a9e2a07148f68cc5cd357ed7512d70415b4248e09abde1da67eb7aa94ee8634de4c550acec389337494177d503db954e991fc45280386f2ccefc75acb36d51f98785da15c8c6082cbe94006e0598da23aeb1d411ca41a04ad9e2a975c4f0eab7f90a39c5155947659fa82398471e4aee8baf3706f7c416c0bc8eb99497af659085313a5d5ba3d74972d8a319dbb8b4d48e75c0c5888f393d9561a52593d6b51a6f7502c755028180b14efe991a7c8003c5ca9415297f2a3a192a90e997dd3fa7a130223cbb62206048b563e6e7a1a22f4ae05b4264613a8059c72eed01a9b050b506af38eb55b13204fc53eeff605c428e6e3717246afc0711eea250f23fda8f122288c74dd358dfcff52b5a5bc65d723e38ac488c917c61f6c7456cdd5282685206bddfd4d0cab2ecfff08284782ed453d22092d1274b246ff122beee0a140f2f3ea2f971ab206fd8ac6f5296ce511d98a7c17fe61a9157de29e216f4762215d394019a7eb19da27d1b24673e23c1e97252bac36d2617ab7ed4b3e41fe0a42c3b1deda9b69f09334461f38eae6adff8b87b84ef4548bec81279f6cd0f18751cac823b5af34a2ef1043c9859bbdf5d4bbdbe594d601c20c3904a0d603887b9624fddf5bd01bd73d817c3d79486fd217813f1588abfca5254949fbdd75a337a558cbaa96da976247d3e0bb5cce63b40a56a413289bac2e31fc0a61ebd62e50e1e56130ab01ed564c1a275a00c6c87917d1c6945b446581f96ee71717389ba014af29068440febe7dff9278ba3d16c671c7092d33b35f7a9f54c1ac9af864378d8969080dda37d4d2498b3e38a6d3535711db2625749ab8490f4f6faae81b904fb0049290a6662d05e8d5d01334474b77d582c749c48508bf58df7b4734f9161b6bed30fa70bf05d60fce6b9c7cfa53f4d3c0f706de225bbc83fba0ea6ec0064577e82815da54d757d6c12e7fc65d94c13de70a405aca384f8d3adb26d4a96875655e7837ea6b781b07bc3d2448e8366450176e4ed81287e2066d1587f33fc9db6b94e5eb43e8703d53d37b989901f07f8db5d8021ecd2a3c9ded32159a3ab45b02a7d08878c3c53945ebf4f2929e2e8d3e8fa28342f490f8dfc435f37d99c3f32bd4e95f12676b8585c426859152bd95c3b4dd2dca53a6c31a728148d5d81bd1c132fdbbe90e729bee19ff9b91fe295f746cda7b548af845f133b04d5ba37b447939689a03a927b548610515664ef3b4333ac81a5abd2d4d725c2c9ee95c6ed098415fc496d131378e57e8a48eb6e75c588df75e0ce4724cd621fa5a7d1d3b45e8bdef8e20902859474af0ca5754d1e0d0f3ff26768469553974e29ec6297ef1fa7d01b22b73a4602140804f0811c8009a900bae094ce52f0852fa7f23522bfa162ff209b8c9a6bc2825a5006ea13e546223e3d940ced5f165f2404b892052bb48b16b8836c1bd3b7f6773cdd38f0f7036e83ca68a3ad0fe3ee6c2f87b0e35538b1deb739e193111741d3e7e6350b14533415c955bb88db5cf80a29b28c8f816616d4bb8c42d9a7aa3d6ce4fab89e37275d55a986717406fa54f04d6cae2df0fc0a417dc68abd6136d4b347716d5bb4d0b0d95ad0084a3969c5f793db00762a2dd3abe0818183b2cdfea1769b4b0ce35dac5741324117085f83af2e108c64470c5609bd196e0b7bbd71459a02dd0744d210ee36329d9d937a8b7f4d9e5e8a382e46284a68da389512a0a8817e612a15474c2c7c6373432fc3f9d87b5491f2c33edcb8a7b6d0f23f4fd73ee777a7647feba0d35d12b138629b7c91014c074e7b63d8b39be2e1726acb3937207a91ee290a252f1c5526f104b765b8d48773b0474ceae1f26120355d7ba7b0b0ae0b172866af96bc38525fc377f8aa432be2c2c1cda192ca07eccf5e0884a183c9d194885cc396f6a85ef693867d6ec688a60e5e51a465ed35c2c51855b99429775f96f1d9e63275ad0ff07b6d7a33fb110dac8e621f56da8146b0a285ac3e50a906e9c2557bb67895cbfdc559e6133ed42d0828f68d7815d2297ad5f9ff8d683df3225b8dda0dcf05c8e26a8ee8862b543a915ee868aa9e70d508e2299aecc5a27d89cd6bdad7ddf64baf1fbae56a8727355e23995d3875d98696cc1542841166c307b84c1e529965fe05bf916f8"], &(0x7f0000000000)={0x0, 0x4, [0x5f8, 0x429, 0xaa2, 0x612]}) [ 2608.771784][T26130] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2608.889118][T26130] bond1176: entered promiscuous mode [ 2608.898289][T26130] 8021q: adding VLAN 0 to HW filter on device bond1176 [ 2608.915160][T26123] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2608.945813][ T27] audit: type=1804 audit(1690919202.979:1865): pid=26128 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.2" name="/root/syzkaller-testdir1875037404/syzkaller.08FvcD/5611/cgroup.controllers" dev="sda1" ino=1951 res=1 errno=0 19:46:43 executing program 3: getsockopt$inet_tcp_int(0xffffffffffffffff, 0x6, 0x2, &(0x7f0000000100), &(0x7f0000000140)=0x4) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) syz_emit_ethernet(0x93, &(0x7f0000000000)={@local, @local, @void, {@ipv6={0x86dd, @generic={0x7, 0x6, '|[6', 0x5d, 0x3a, 0x1, @loopback, @mcast1, {[@fragment={0x2b, 0x0, 0x7f, 0x0, 0x0, 0x1c, 0x66}], "73bb6397edfbab4489faaa383c460acf7cfcd7ced41078d7b32aaca4f9405553807faee0eee5d118691f4bc34e4757bc8f58d6d7d374500d97a03224ee3650813f4e75f00471fc4376aaa4bd0a8e5521f2e2fa2a27"}}}}}, &(0x7f00000000c0)={0x1, 0x3, [0x3da, 0xda3, 0xc41, 0xa32]}) [ 2609.001296][ T27] audit: type=1804 audit(1690919202.979:1866): pid=26126 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.2" name="/root/syzkaller-testdir1875037404/syzkaller.08FvcD/5611/cgroup.controllers" dev="sda1" ino=1951 res=1 errno=0 19:46:43 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000280)='cgroup.controllers\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000280)='cgroup.controllers\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cpuset.effective_cpus\x00', 0x275a, 0x0) ioctl$FS_IOC_SETFLAGS(r0, 0x40086602, &(0x7f0000000100)) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000000080)={0x288000c, r0}) r2 = socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) write$binfmt_misc(r2, &(0x7f0000000500)={'syz1', "4a7547aff0800d1fa75912a4dc682001e220889a5d16a1043e6b38b6b59622b53384762ba177dc8ebdd13f37c53db1e31c801e6a256c9ca3e9ffe2a3f4d1e9f48bdf5e95640125b1bc2e35860dc8c77195db9e5b8f58c741f51b97b614f9d8558e20af5c5c49c6a5407ae93fd808435118b4778b03294d51ed4af0b12388972e8def2a78241f9268e913a1caa3938a997f37a3b4ed07a6ebf9e34d3e7724a30de080e6f9f7826feab92d3e89020fff67440ec95f406abb6943a4774fe2eea59128a7af9a035943294fab02547887a0e569ed46dc570e3dfe1490b3d89e5152a992e3f263"}, 0xe8) openat$cgroup_ro(r0, &(0x7f0000000140)='blkio.bfq.io_wait_time_recursive\x00', 0x0, 0x0) (async) r5 = openat$cgroup_ro(r0, &(0x7f0000000140)='blkio.bfq.io_wait_time_recursive\x00', 0x0, 0x0) openat$cgroup_ro(r5, &(0x7f0000000180)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) (async) r6 = openat$cgroup_ro(r5, &(0x7f0000000180)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r7, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x100000b, 0x28011, r6, 0x0) r8 = bpf$ITER_CREATE(0x21, &(0x7f00000001c0)={r7}, 0x8) getsockopt$inet_IP_XFRM_POLICY(r1, 0x0, 0x11, &(0x7f00000002c0)={{{@in=@multicast1, @in6=@empty}}, {{@in=@empty}}}, &(0x7f0000000240)=0xe8) (async) getsockopt$inet_IP_XFRM_POLICY(r1, 0x0, 0x11, &(0x7f00000002c0)={{{@in=@multicast1, @in6=@empty, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}}}, &(0x7f0000000240)=0xe8) r10 = gettid() r11 = bpf$PROG_LOAD(0x5, &(0x7f0000000080)={0x11, 0x8, &(0x7f0000003500)=ANY=[@ANYBLOB="620af8ffa1dc0021bfa100000000000007010000f8ffffffb702000007000000bd120000000000008500000010000000b70000000000000095000000000000003faf4f2aa3d9b18ed812a2e2c49e8020a6f4e0e4a9446c7670568982b4e020f698393aa0f3881f9c24561f1b2607995daa56f151905ea23c22624c9f87f9793f50bb546040677b0c5077da80fb982c1e9400e693146cea484a415b76966118b64b751a0f241b072e90080008002d75593a286cecc93e64c227c95aa0b784625704f07372c29184ff7f4a7c0000070000006056feb4cc664c0af9360a1f7a5e6b607130c89f18c0c1088d8b8588d72ec29c48b45e0000000000000401d01aa27ae8b09e00e79ab20b0b8ed8fb7a68af2ad0810000000000006fa03c6468978089b302d7ff6023cdcedb5e0125ebbcebdde510cb2364149215108337719acd97cfa107d40224edc5465ad32b77a74e802a0dc6bf25cca242bc6099ad2300000480006ef6c1ff0900000000000010c63a949e8b7955394ffaff03000000000000ab87b1bfeda7be586602d985430cea080000000000000026abfb0767192361448279b05d96a703a660581eecdbf5bcd3de227a167ca17a0faf60fd6ad9b97aa5fa68480366c9c6fd6fa5043aa3926b81e3b59c9b081d6a08000000ea2b1a52496dfcaf99431412fd134a996382a1a04d5bb924cfe5f3185418d605ffff9c4d2ec7c32f2095e63c80aff9fa740b6c7632d5933a1c1fa5605bd7603f2ba2a790d62d6faec2fed44da4928b30142ba1fde5c5d50b83bae645ffa4997da9c77af4c0cb97fca585ec6bf58351d578be00d952aab9c71764b0a8a7583c90b3433b809bdb9fbd48fc877505ebf6c9d13330ca006bce1a84521f14518c9b476fccbd6c712016219848624b87cec2dbe98223d8d9e86c5ea06d108d8f80a0eb4fa39f6b5c02e6d6d90756ff578f57000000009700cf0b4b8bc229413300000000000000000003000000000000000000000000001000000000559711e6e8fcffffffffffffffb2d02edc3e01dd271c896249ed85b980680b09000000000f0000169cdcacc413b48dafb7a2c8cb482bac0ac502d9ba96ffffffd897ef3b7cda42f83d53046da21b40216e14ba2d6af8656b01e17addaedab25b30002abbba7fa725f38400be7c1f001b2cd317902f19e385be9e48dccf1f9f3282830689da6b53b263339863297771d74732d400003341bf4a00fc9fec2271ff01589646efd1cf870cd7bb2366fde4a594290c405ff870ce5dfd3467decb05cfd9fcb32c8ed1dbd9f70a64c108285e71b5565b1768ee58969c41595229df17bcad70fb4021428ce970275d13b78249788f11f761038b75d4fe32b561d46ea3abe0fa7956488bef241875f3b4b6ab7929a57affe760e797724f4fce1093b62d7e8c7123d890decacec55bf404e4e1f74b7eed82571be54c72d978cf906df0042e36acd37d7f9e109f2c06f815312e0cfe222a06f56dd022c074eb8a322fb0bf47c0a8d154b405c37feaf3dd95f6ef2acd1fe582786105c70600000000000000b7561301bb997316dbf17866fb84d4173731efe895ff2e1c5560926e90109b598502d3e959efc71f665c542c9062ece84c99a061887a20639b41c8c12ee86c50804042b3eac1f870b136345cf67ca3fb5aac518a75f9e7d7101da841735e186c489b3a06fb99e0347f23a054de2f4d92d6bd72ee2c9fdc75aaaf1e3e483b4ad05573af403269b4a39ce40293947d9a631bcbf3583784acbda216550d7aec6b79e30cbd128f91e358c3b377327ac9ecc34f24c9ae153ec60ac0694da85bff9f5f4df90400000000000000d6b2c5ea1393fdf24285bf16b99c9cc0ad1857216f1a985f369191ae954febb3df464bfe0f7f3ee9afe7befb89d2777399f5874c553aeb3729cffe86e669261192899d4562db0e22d564ae09bb6d163118e401e024fd452277c3887d6116c6cc9d8046c216c1f895778cb26e22a2a998de44aeadea2a40da8daccf080842a486721737390cbf3a74cb2003016f154772f514216bdf57d2a40d40b51ab67903008485b3b8a8c9ae3d14f93100c2e0893862eef552fcde2981f48c482bde8a168c3f5db2fea6f26e4a4304e50c349f4f9ecee27defc93871c5f99a3594191e104d417e60fc3541a2c905a1a95e9571bf38ae1981c4238ecaee6f75cd0a6881bd1517a8250df98674152f94e32409e2a3bce109b6000000000000a1fec9000000d694210d7560eb92d6a97a27602b81f76386f1535bef1497f92186086e29c6bc5a1fad6ec9a31137ab79a404abde7750898b59270bb29b81367ac91bd627e87306703be8672d70d1ab57075228a9f46ed9bd1f00fb8191bbab2dc591dda61f0868afc4294859323e7a45319f18101288a0268893373750d1a8fe64680b0a3fc22dd704e4214d00000000d6c98cd1a9fbe1e7d58c08acaf30065b928a31d2eca55f74a23641f61f2d5b308cf0d031b0c7f0ce21d69993e9960ff5f76015e6009756237badf4e7965bbe2777e808fcba821a00e8c5c39609ff854356cb490000000000c1fee30a3f7a85d1b29e58c77685efc0ceb1c8e5729c66018d169fc03aa188546b3ad2a182068e1e3a0e2505bc7f41019645466ac96e0d0b3bc19faa5449209b085f3c334b47f067bbab40743b2a428f1da1f68df75cf43f8ecc8d3726602111b40e761fd21081920382f14d12ca3c471c784ae7da7eaa69eb7f7f80572fdd11bb1d070080fbc22bf73468788df51710eb0b428ee751c47d8e894f745a868404a0bf35f0121008b722b1eaa6aedfa1bf2e7ccb2d61d5d76331ff5e20fa26b8471d42645288d7226bbd9c9e9e1cc9eb3d541e407cc2dae5e690cd628ab84875f2c50ba830d3f474b079b407000000deff000040430a537a395dc73bda367bf12cb7d81691a5fe8c47be395656a297e9df0e71b967ce7daac4be290159f6bcd75f0dda9de5532e66ae9e48b0ed1254a81faae79b6af6fbb869604d51de44c4e0973171ad47d6c00ebc7603093f000000fdec743af930cd6db49a47613808bad959719c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f15d6533f78a1f4e2df4ca23d867693fd42de9b49a1b36d48a44ba6a4530e59bec53e876dc660dd63bed8d31c31c37a373d4efd89f0000377b1b1292a893a516dab183ee65744fb8fc4f9ce2242e0f0059161c5e0000000000000000000057d77480e0345effff6413258d1f6eb190aa28cbb4bafe34124172e436b176c7ed4b132fb805d5edd9d188daf28d89c014c3ecca10ae55704544673e1fa03b84f63e022fe755f4007a4a899eaf52c4f491d8e97c862e29e457060000007ac691faee1e0c8fe056a07474e6e5490a7d3c3402000000b60600d837c6befc63ddf2f594ad7cbc56a1e44d218c956a5392a995f1fae8e9f206efbb33854dc70104d74dc07748f9745cb796da2dfb714a0500000000000000faed94fc39acfb3fd25dfa8116a154cd1226e1bb72b59fed817072a0da60160761fd3dffda0f7c592eabd8ab68334d2a1693cb187539049e331272bf5135044df8161400211b8012b6eb1ed5656e83f65509bb4b323c5bd61bff949d3bade2f6ffda1360c2786e16937ab61d6dcafed319c716357d0885f9c6d1f442954c167dd9b4acd9468ce3674c82bbb2e31389179b025dbe063b7f906217b2cf8410c7023aa3e5cc3ba1000000000000000000000000000000006ae6301a2da44394275c582a6516bb92ea1980a0a659f2f1811c8b281c209647c4241f292b20508b215dde27bb2487a6e2b5e4a8ccfab90c23827ef06cbe364073005f8a6d1456aaeb85ffb7858f24eced67a67ab825e863928ed64c83f62ffdaa997657335b63c6b4163aff094059e626766845fd779c9e6cdbbd64c24936615ee68538e8fddd0d90f3a7579579a142c0f7b318264d5c13c31cf475829528267ead38523cab7e1664e8426ca85e82ccf821c8a02a7e7d954d05b68a9c28f79429b09e2bb3681ae2b831e27c735123361c193d66ed4d71f19b199d371ec6bfada7cd370e3fdd3cd980fa1e145fd3f3e96b1feb53c865e1ada08f5d16ed652ee0c7f45352222692fbd679212c225d097aa90f7e1fb1f983415f43e75a19ecf7fd21bfa150ef563aa72ba1c43c5f3d9be128ec26b691f31f9cab931631606a81622f120675c962be2d3b5e95f74f0b209e42e6bdd76e6e725295b1d78d928f6f63c41cbde2ba66ad81168070c8c6e18a6e452a31bdc4a60d637545ed4c8a1c649c3ce54ad3e16304d06a234f5f9311ef0f78924b68dbb4712efdb6974667bdb54f16fd2061b9ba93638dd177227e94e4ebd0ec1d437db948062bf41742000000000000000000305f70dd02fa0c61d5fe6d8ff35389246037e18d34c1375ae04f44f0c2543c772c5ccb137be7dc1874c514b37c668554d77d4ea5ed144a648257f4a0301067bbcd9b91072659d872f26b796e2b81025edb5f45f785e2c2602b248ecdd80f019ca659be7e8ae953325a27564f33c9d458a60be3dab38baab7eb1a66ab1ffd6308f7fd51beb356fe75eb985b7581bb5584c53984ba9c7340f97e8d3825681c53de5f554e595b00000000000000006a8fa9f05d64c4be42f981f00051a39938613067dbd1427e01bfec016e51844cefa8a855bf23ac887b4a88eed6d9443857242f28e31a41d20105fbf3394ff910e734b4d9101265ff729c426e01c1ab13dda8c388b9e6626f19eecb87e39175e85e17000000000000000000009431807e43886903526074e6b40244c938a4c68a38c25ddd7c143b3f14eafe4b28ec66815cf8d1f56aa1424bc9b5d58790298e5b310969e50c222563b54e60854e1bfeef448aca8c5ccbf5546ce4c3cd5a733fec25fb94e1e0f966bcbd28a4d8fe4f556eaa1104a793006619700798354c6ae05025040965e3083562bfa20968c04007d21dc02c9fd1f75e1ff40f439bdde4e784012e52049b483f02f81b88f5f57816b3fecec79cfca8d37203e769759d6b6a56b7605ced8ee18475a77ff0963a565fb6021d216c01b1098e40550a1cfd80e9180100000000000000654cd76ca61fe5ad8a31ec558fdbfa706d5e738bceae81fe777c307d5bc72183a4c2d35732e74dd690c57bdfdc1f069f9491bca7a8c59363799be70018c25ece5ad7307dc7a95c51bc25a8bbe2cf5ddf6aa161693782b0e7feb8a768f391b49d4c978c96dbb52f21c122eba9f17c8bed10591958cf06321a248b5f76ceedfe0d080d6aeadc11b237b3326dd04b86ac37c0d131544888db9e128d059761ad9a393e96c3b41c13c5a381bff187a75de560ba6eb3faa5ff8d2bb3c88f8de5efc2fb2200cfda6d07ceae22577064334fbf76a23e62e6059211d995b879f6b7d3f7fcf03652b81e6b7cdeff947ad185d3c6269ca247b429c3b872a8f1ef60407d29a874f4ec31c9effed55543a65a6b4d778cebcd43b7905f3960140bd783540a7353014bda8e9c7a34a5f428fd1f8eb11e837dd9d586487fdebcb1ecd3a003ff0fda4be617fecf1ff0ef2cdfb7fea73ca18874664d60a4b9423f3297bc8eb91b4ee1d73272abbef3e7a828a7d7ab055a8eb58fe379de85338304e26e3620941b463e9049fd105c74c91cc4d71b0f76e2c2e4825106aa7ce2a3adbbc7a0443ece58e752b47e6f677ec97c5c568a89d6e36b165c39132a0f27080ece2a94c320b002c77f82662675a7713c7067081cac15994698c41ff4754268ae2676384ff799783f55d7e5a1a092a01b965dc99cb7a9d98440c355927629f2bcf9dc2396eb2f5d25829715b24327642ac48f1201014a95e0e65e12cdf27e19043e3c5d3e798375cead35b9a93190a52cdecaaccc854a1d41ef365303f0e9b4fc969c9dab6df5e8a795b140fcc09e8a7b694d12932917facd8ceaa4e2d0d16bb0b95387fcd5ff136d8a673e82e8019dffa3e89aba6755f3bfbddf94daf442bbff744591931872a36cf921ad69f2127386e8b0f9afee4da8d3fbec809fbb3ca0fded2859cf25d4c6155d396c5b9bd1a928923123f63f4c40688eae69990a94194562473867ebdb502e6c2d6938b9274b2d3648857f9a83ab5995d23bd840274764a00d97c64db9805f62e3ce4327cfed7a167ab78a666f960669b69ac9f70840cb95b6450d0437adf84c45aac026843d7db2286c482551978e4ea4eb3747e35e7bf40567003506b9a665291d75106fade70809d6329aa30adbe59fab0046b6a053cd6b4393883f7170901cf1c1c1c714a864329e73ce95c42865f3f1d6bbdfd6f57a04e06815993834764e2062813f38b109dccadfe967473f5dcc5c4a3549472873533cf75f202c3ee586bdf663de385708654be8950b2c6b159cfdebea7134f59bd799714221de0331f8da537ebf5aeadaf21efafbb184a056a16515f470de703db895381101880877339860b69b540a92613d84798793d370fc45891cee8a2e8bcf2f6d9b4b3918e770279f8a01eca72918039b86e69a9e3d873db27b3fddc7"], &(0x7f0000000100)='GPL\x00'}, 0x41) r12 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000000)={&(0x7f0000000080)='sched_switch\x00', r11}, 0x10) bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f00000003c0)={r10, r12, 0x0, 0xd, &(0x7f0000000380)='*,.\xb1/-[,{-\\\xdd\x00'}, 0x30) (async) bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f00000003c0)={r10, r12, 0x0, 0xd, &(0x7f0000000380)='*,.\xb1/-[,{-\\\xdd\x00'}, 0x30) sendmsg$nl_netfilter(r8, &(0x7f00000004c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000480)={&(0x7f00000003c0)={0xa8, 0x6, 0x7, 0x301, 0x70bd2a, 0x25dfdbfb, {0x2, 0x0, 0x6}, [@nested={0xc, 0x66, 0x0, 0x1, [@typed={0x8, 0x7, 0x0, 0x0, @pid}]}, @nested={0x55, 0x0, 0x0, 0x1, [@typed={0x8, 0x82, 0x0, 0x0, @uid=r9}, @typed={0x8, 0x91, 0x0, 0x0, @pid}, @generic="bee862edbe1b1f620e452a8dcd4d7c23e8c74fe3f0a1ff8cbc8c0e3f4764553d174ba913a554274d27012e22fee00cfd1a", @typed={0x8, 0x35, 0x0, 0x0, @pid=r10}, @typed={0x8, 0x61, 0x0, 0x0, @uid=0xffffffffffffffff}]}, @generic="5927c5107210bc161c85a0b737b16f7c1b1fd59524f821cf2a6f333dce772fb76fa12fdb", @nested={0xc, 0x20, 0x0, 0x1, [@typed={0x8, 0x87, 0x0, 0x0, @ipv4=@remote}]}]}, 0xa8}, 0x1, 0x0, 0x0, 0x40100b0}, 0x240000e4) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r14 = socket$inet6(0xa, 0x1, 0x84) bind$inet6(r14, &(0x7f0000001180)={0xa, 0x4e20}, 0x1c) (async) bind$inet6(r14, &(0x7f0000001180)={0xa, 0x4e20}, 0x1c) connect$inet6(r14, &(0x7f00000010c0)={0xa, 0x4e20, 0x0, @loopback}, 0x1c) getsockopt$bt_hci(r14, 0x84, 0x11, &(0x7f0000002280)=""/4090, &(0x7f0000000040)=0xffa) (async) getsockopt$bt_hci(r14, 0x84, 0x11, &(0x7f0000002280)=""/4090, &(0x7f0000000040)=0xffa) r15 = socket$nl_netfilter(0x10, 0x3, 0xc) sendfile(r15, r13, 0x0, 0x100000002) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, r14, &(0x7f0000000000)={0x8}) mmap(&(0x7f00006af000/0x2000)=nil, 0x2000, 0x5000004, 0x12, r6, 0x1d4aa000) 19:46:43 executing program 3: getsockopt$inet_tcp_int(0xffffffffffffffff, 0x6, 0x2, &(0x7f0000000100), &(0x7f0000000140)=0x4) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) syz_emit_ethernet(0x93, &(0x7f0000000000)={@local, @local, @void, {@ipv6={0x86dd, @generic={0x7, 0x6, '|[6', 0x5d, 0x3a, 0x1, @loopback, @mcast1, {[@fragment={0x2b, 0x0, 0x7f, 0x0, 0x0, 0x1c, 0x66}], "73bb6397edfbab4489faaa383c460acf7cfcd7ced41078d7b32aaca4f9405553807faee0eee5d118691f4bc34e4757bc8f58d6d7d374500d97a03224ee3650813f4e75f00471fc4376aaa4bd0a8e5521f2e2fa2a27"}}}}}, &(0x7f00000000c0)={0x1, 0x3, [0x3da, 0xda3, 0xc41, 0xa32]}) [ 2609.245527][T26123] bond1215: entered promiscuous mode [ 2609.264907][T26123] 8021q: adding VLAN 0 to HW filter on device bond1215 [ 2609.308683][T26131] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:46:43 executing program 3: getsockopt$inet_tcp_int(0xffffffffffffffff, 0x6, 0x2, &(0x7f0000000100), &(0x7f0000000140)=0x4) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) syz_emit_ethernet(0x93, &(0x7f0000000000)={@local, @local, @void, {@ipv6={0x86dd, @generic={0x7, 0x6, '|[6', 0x5d, 0x3a, 0x1, @loopback, @mcast1, {[@fragment={0x2b, 0x0, 0x7f, 0x0, 0x0, 0x1c, 0x66}], "73bb6397edfbab4489faaa383c460acf7cfcd7ced41078d7b32aaca4f9405553807faee0eee5d118691f4bc34e4757bc8f58d6d7d374500d97a03224ee3650813f4e75f00471fc4376aaa4bd0a8e5521f2e2fa2a27"}}}}}, &(0x7f00000000c0)={0x1, 0x3, [0x3da, 0xda3, 0xc41, 0xa32]}) [ 2609.462547][T26131] bond1101: entered promiscuous mode 19:46:43 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x20000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2609.498173][T26131] 8021q: adding VLAN 0 to HW filter on device bond1101 [ 2609.735860][T26134] bond1176: (slave bridge1079): making interface the new active one [ 2609.758915][T26134] bridge1079: entered promiscuous mode [ 2609.790296][T26134] bond1176: (slave bridge1079): Enslaving as an active interface with an up link [ 2609.971187][T26135] bond1215: (slave bridge1145): making interface the new active one [ 2609.980519][T26135] bridge1145: entered promiscuous mode [ 2610.008879][T26135] bond1215: (slave bridge1145): Enslaving as an active interface with an up link [ 2610.067398][ T27] audit: type=1800 audit(1690919204.099:1867): pid=26147 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=collect_data cause=failed(directio) comm="syz-executor.0" name="cgroup.controllers" dev="sda1" ino=1957 res=0 errno=0 [ 2610.169060][T26138] bond1101: (slave bridge1031): making interface the new active one [ 2610.189230][T26138] bridge1031: entered promiscuous mode [ 2610.217660][T26138] bond1101: (slave bridge1031): Enslaving as an active interface with an up link [ 2610.281887][T26170] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2610.376716][T26170] bond67: entered promiscuous mode [ 2610.383089][T26170] 8021q: adding VLAN 0 to HW filter on device bond67 [ 2610.442914][T26171] bond67: (slave bridge63): making interface the new active one [ 2610.451578][T26171] bridge63: entered promiscuous mode [ 2610.466439][T26171] bond67: (slave bridge63): Enslaving as an active interface with an up link 19:46:44 executing program 0: bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x24, &(0x7f0000000180)='/proc/sys/net/ipv4/vs/sync_qlen_max\x00'}, 0x30) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000500)='freezer.self_freezing\x00', 0x275a, 0x0) r1 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$NL80211_CMD_TRIGGER_SCAN(r1, &(0x7f0000000100)={&(0x7f0000000040), 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x40, 0x0, 0x20, 0x70bd25, 0x25dfdbfc, {{}, {@void, @val={0xc, 0x99, {0x5, 0x33}}}}, [@NL80211_ATTR_SCHED_SCAN_MULTI={0x4}, @NL80211_ATTR_BSSID={0xa, 0xf5, @random="03dad14e7f0a"}, @NL80211_ATTR_SCHED_SCAN_MATCH={0x10, 0x84, 0x0, 0x1, [@NL80211_SCHED_SCAN_MATCH_ATTR_SSID={0xa, 0x1, @default_ap_ssid}]}]}, 0x40}}, 0x4000) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0xfea7) mmap(&(0x7f0000000000/0x3000)=nil, 0x3000, 0x1, 0x10012, r0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r2, 0x0, 0x0) 19:46:44 executing program 3: syz_emit_ethernet(0x2e4, &(0x7f0000000200)={@broadcast, @empty, @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x2d6, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x880b, 0x221, 0x0, [], "3488d1f933d839a506b26f6661b6a9c77b71e07542794b9c0c12c3344783a3c0de3e30ac38c91e3a8731cddafa4cd27ea453ad5d096d6f0b06869f304dc2aa43272f6868e681cc3f04bfb4895c6beca76ba540d848562bba22e4a19d6c55b082dbdc6547c913af9de233ed818b854815bf742480fa582ad908344254b0dedaa23a8bd0830f999d3e12c2b8a7f8f03a102d2b357fb6f26e4e1191c4fc249a2705c154e7d4a9f7def6e29788cc56ce452f50565e07a90b02709cc7839ee413e67933cb8aa6adfd4f916c11d4aaa6cea7a907d88a55ed4aed0ba5c2f3e437197eccc11a4662f5f3fc8d77b2bc19a7e2e4803ebf1c730868f4286bdb1f21b7cb2dc267712e6f389b06b2bd89910404ca1e8026bdf416f7c5d22770324406002ff1bbe007e909aca81097f24bff098105a67e2f7f3ca72ddcb84e5df1d89f084f9886f98b7eebf8c1bc59a0b454cd4c2a8e90daddbc277f26bd47302b1f2718471113ba1130fd582ac43ed60d7f6aa387f563b35f2f8c57ab6c88d398effbb5b2b6c720916b3e7d3ffb4978da85b55e468ae4060f160b6e2f8416a21c7984149951a00ee5d6039a24e32536a3e3e914ebb439946279f7d370ad5c28cc1cfa9436f0644fe5acb15a3acd8b55d4ec0eb7af6681a1ba6133d76e4703d779ebb03f7f87c17a4981fce3194649af61139d4a5e189e2110c522371e42697095f65e2ba304075f758be2bc58f7bad4d7f5e48b7fe26c4e22689ecfcba6069d6c5c9b686863e1cc"}, {}, {}, {}, {}, {0x8, 0x6558, 0x3, "c199c0ca2e3f11671ef96fcc0fba6530c4fb25aef2158d8cdf0b7c8ba38960cab43ac9d3e454bc64852598a1fef63e2c45e37801ce6007a1cef34f84e3509447c97b4b29888138d1432601edde678403e87711d7900fd899342b46245d"}}}}}}, 0x0) 19:46:44 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x32010000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:44 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2f010000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:44 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x5c8d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:44 executing program 2: sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000006c0)={&(0x7f0000000700)=ANY=[@ANYBLOB="6c0000001000010400"/20, @ANYRES8, @ANYBLOB="0526060000000000300012800b0001006272696467650000200002800c002e00fffff6ffffffffff050007001f1900000600c9d60000000008000a00a8", @ANYRESOCT=0x0, @ANYBLOB="2ccd13158772c364a7cf17d6c911a05a0b353e91f169d98ade4cead5a8fc78af5f2bc3200eeb31834404066dc3251e18e45a0ffa71d2ef3ce9cd20f72e625a59a9ba302b0b086782e0096356df1df5416cb10f19610f5a81362b100706e14f758a31dd473378c8a20cb3c6c191e0700d404ecfca4ce3773cb92f16c3841c479bee5effd25e37e3429c02ab2e38eee7b0fe16029e902cbd435d2ac45ed8d6207edbf5062bc7945ec7a1221176f4aaae644c0664b57f3b458c31e2b81dc1ab9beb7c24ebe767e04e8a6c5d2b888dc9f1c5c0e2c8e548baf275e0f53cb330cb9ca107215c9857d56cbe9f0b08d4b3f50e4993dd43afa2fd9e01b7e324ce9c4cfdbb62421108a9f7c7ddc4c3241a0f1698f4d0a114c305077fe20b01e1bcb23132a23d2cbdd8ef080addc94984a08b3f1f2de3e4a1efc186c8c04116443d3301d7023465797a1fde6f9326e9b0c4ecff4b4ee58919c5900c2d193f0f3fe964a8e467d648506ba8b9d79c7751c2c61bffb9a503abe13aa37c3f6de2879c05999a24c226f5c55bcd7631c92a041fc8e60db47529dd8978f77dc94c2cafa375d1cb78803035b9b78b97d3a83d417803d6fd7a2e5e5bdcff234a044980ce619b7804aa15a95ad0c4f5eaf7aed0d55907ffb0502d1424d485eac02477d917608001146ad62ead28a43d56f7f08a71a91451821d4533ea14c0beb47d9466f2ffcc4a5b84dab695a4a03d79b8736217419fc4b6912952e7b14bd8946f5da70a466166ef0afd8a21db37c9aaddfadc7136efe216e44a23952b39fedc796f2bb06c6048ed133d6887df953a96abc3ead5997ed442a0f9bed1f3db599c1fcf02d88bd16cfb85771bf99ea0375471d5e957e508fb0502ef9c14da41d48144b6e961266bc394af18d0c7cbf025d5008472e8f4602bb8b2081d35ef362ebdb21a964afb4d2a40675d30590994805f00aa5690068393c70ce532404e1ac7e61909d44e487ee4267205603ca20f78ebec10fc468ff27ce912eb6479241ca76d26b1dde798de23464b3f8c13f732bcce03c07a8546986ee9c468ec314cb9d0fa9a541b4fcf81eb7200f83a3a16db551d7c1eb77768f477f8bfd0e11a4a24dc8205ea561a34034a60a10d4e3dcf192b271edbbbd292d32a57b4cadf520623f1bb20bdef192467bb7eb8eb6c802d830e6a9b187a338420555636a61bac368193d99862a60ea336cbd1fd023ccfd99dd22079e49226f39be804ef551930bca3a7b7367dd6ca1398392ea7401b5aedc94adf5f0dff6d02b2abe2c293fc72b68d76ee95429ab626286102bae76d96b00e149d50d804460ee74d768e4ce6491f9aa0ae783fb9957a6a2b772ee22e82abf6d49e82905a9714647e9dcb75b04176468bb758cf97994c298ca0ab38e029d4f192d93650b5fdaf7fecd8bc741d158ceeb0861b306f1efa1864e769f59ea5e9d851a4a3d8959453a67e65a15f54173939255966fced1812e0f8220e692ab1de400ed331d6c0b02234b39b8eb53123c3b9da4f9ff8f31b5393e8942ee37808f6437ae8acab2e992e76e9d8c4ecd68040ec378bff010a7900cf7bda31ed3bbf4c74fb4c4cfb79e8ad68ec5cfb50eaee1bb232427d20eed887598af2027a76f467854e84b113ee0dbaf6af1d79cad64794eba6c34e8bbd6cce4aa0be202b1adb08f02421b180173435fa6a02fc79b8617109ab346c52b18f187be3d2eeaa4585e957ee118361979e9d756f2e67cbf708c47f7819556bf18103d8f92eb69c3d7c329c1c8c3bcab78cf977e23ea8c86ecf5f03af45f3cd9bb14bdac05336edce26aa0d4eb13071c076a18ddf71cc9c35ecc0b0be6aad7826de1337ccf16b019cf726b124126963437c0a8eca0a0d72e24fce86ae34840fbd588f467affda1bc929568088de10197d3ab3c17cadfc1a15c4c1c73d153dc7fafe1a19e9ead1fe26ec5c9509b3aab679736d78c83baa130786e328f70036a316aeebf8c88e6a4a8eb90746da501e38f281acb2b13fbce1cc7c1858406f42a12c137476756709e54a03caa25642dbc8be428c23a1d7643cd7515d9cc9bc84588fba6b00cc6805384992ff442a2419ff79a42a0080d32707da0128910e248e5a2caa36d6a4176f8325e5d20f5fd82754d9f333aebbbbc1b20ff409a88ab2ced681deb45b7a9c25165e1ede74a0ff1ef814a99898504e9f1d36edc14abbc7f1e9063ae1cd050e54fa1fe0e3954f40fa070f3b4391d597661c43133db2687ce101fd90a6972e7542ed9bb641615fe5853a0c6dd87b1e3773f73156180706a30e8038785218a9e6a29ac0ff9c136be1edd35c4da4997f8a4925165f2581ef70b097cf8b53659ad6a4d5c5e6c7f2fc28d7f65338e29f9357439c6e5173e2967948125445149546df2334e9c3c09340ed1a713b5b895e4e3953e0668bf0022e5643e642bf1207fed3c49ffb24c558e816b8fe413c22575dc8b8ef2664453187a74408d700feb5f2824d9efc41877716c0387d67e7a0e6f41de145cab9d178d2ae63e5c0a73ed46cdbe6db8e17e52dca3c32e50bb7d91f5d7615de78d02889a90d52cf872c257c6b3ef9e5a22044af5c3e298888a4b050fb6cd94b03e4605cafa5abec8813133dc08b33a49875af593ea447998df60888d6d73eb0bdfbbaf0a753b220eb584798cf5424411edb6366b83615cef6da0631897583d88781b05a7d185914b26f85718aea8be61c3c114a9458d4f414d91a1d70a7498624946d75278137954fe37e5d9e6e730dc5c4d1fcc0a793c64c0807914243fa39f8ab8afea87db514705674724820387accc221fab6d90dbd59b037da43a8e6cc26df4cbc591fbaceac2d34ebe5c81247a92eab1f82a5e0378984b2b0f5af63191b149ac8e658626590dc5d154dfe032c48955c27baae7622284dcbe0abd117be2cebb3afea4e94c19f76b8a58762ae5e71771452aec20dcb74b22da54f9346d3e7cbb253d4eb985532bf7b55686aefff30d5f3c035d6f984dfbcc1cef8813d4298a21d3a0c0d7ac87742528225fcee9ccf9c705af0c54bc7ca84f098e76f715b2fe0261f2da9201b09d7a70b40dd70a2b96b16009a7820694ff5015a08f106cff95aef79c7755ad184bb714993b295e53d65f5cd5aacbb4302285f9c941266a463ddcbd8a0ce27cb3e9e20b078e5debb3895bef6df9651116214eff3fd92454ed9f243106a6670dc48ad16d2b1e77dacb2edd149e430160e068fccc843296481981c104298cab7347d9e19f99018e44ddcdba7dab500c99dc0193aac4ea1de69b839988b7efe94b9e23a55c9b3707113e2f2449cc6a24dd6fec2cd156e4052d5c3b6a99bfaf2cae4cc736820279c3142156e42c8140c68003dd3a570724aafbab9014dc56e98bbba2b1e902eaadf5ccb5a7ed0e8763ef2857f5161b0f83bda666cf72619684295d3f3ef005bd6e054137c738f825e5631a8c0ba28a2771a29738719c7c5a54664a87d6233c1e53c969f44a9b42e1e0e0ee7c88c434720d16353e1f7569c200dcca69c49c02ef0b8548580c743830fb59d1da544d0a926fbbd663ce8ef3c0f39aa0380e7867ade8878628beb3cd7693f5a85b663cb8679fa6ea2402062f9ca1cc565f184ec6a37b6372a456a19d0e92b91033d852f5b3c8a51dcf867d950586a1539f3358c82cc871bab2bd9334e648bbbed69191498719a3c97f32faf9dbb8071f90333aeb3dd8187fb036ed62b41db2a53687f235dcf8bb64c57af3628207772f8dc9ae9f26e7676f85016290873e5bbe308f7fe3cffc29e89b70e2dba370bff92fb38f6e913a79a4cde814ad4db750e0986219550ee1db33bde2ec7636df7ac64673b0f900d67195a497adafd9c9fca44653f312b42237f2f4162e3c03b98d7862a1a0f69430e6d33ef515246626cd4972a8ce1a79d62148365a6e7712d2fd1a7f54c4f1087969e76d152e88ab42fa2f9f982428f6ce45871bd7a65034dc310df4027ac5e390fe0ac0836edbda58560c88d5121807d9d5b9a071142b99f10f7cfa25d35a7411b458972b75eb9e6aa7c784ee328891412d5f59d6eea9eb4d567049b7febc680ef47ef84c57894ced18e7789cffbf1dd4202201813b06ff6f80fa7f18278b375b3d89327fe8aeb29ea143212164738934ddf2f31e8a1a806089e9d1e5dc0b2ac70d5817cfcabf35d84e1fe55c94d6529657c5d03871d44c7a65ada7cee41bded205fc9a921f6db70d6f7ea380dab82dabd22d25c963f48955859f662f56ddb0b9fefdaad114a5aff2d1d57d78bcb07a41e75da2a156abb89f740c8872ae8b887f27590f7f42945c85fd891611defc72ab00a259099188dd019d8205c2c4327cb03839c1a72a883125babd96c846fcd2f9c62c1a45ed20eca722bfc124e7f1239d122dc624b94516e3c2e9c1472075ca3ff81817161f36df290adfd60d45e07128d7460e0da4653dd0f5a1d67660565c1ee91870fe7be42b0943780fc753a32a8146c478b31e49a9a87fc4d2d21deb53ec690d1fb014ee3563e2408679f9913bfaff80a0965370633c2f608b733f5314ce907c9b02b9a6673ce7ee431037d2be6b16180ccd9b25f1ff1f9c35a9f4c1149a36cb679e04a1752e2cfd48521ff1af851b572e8869eb32d513b61ed61326d9daa29a6e9dacc8166ffbf198fce74deaf3e6a535c75ea46319700c7e91c7bd7d53b2d54bebe57973c74c43fcc71aefd2f440a539f6a96b52c9a5d3d5f8d58864a4b32c5fefe32e31be9e1d6f37cedb3db9dcf5212f6a4347b9f095588f3f51d744e54888249bcc2813fe324e964838ef3417652d45aeba89c958aa67fa263cbeb6aff32db45dd9317eeef9a4513c43f74a19b9abdfca463c7105dc5d11ef6b7d004af7358ceea84ab92579eaa63c91fd08350028859ba348dd2b772ec9029ae2595a49679f303ebdb9ac69187a3f9752193d88c8e28bf6838024607b42df55a6de8d27a947d48a8572f99eb45d253c1c4213794b503cb73026714e2eacc599368d780e75b8a2ea5bbccee5abead13dedd732ba5a394ce290647fd15e2dd339e442b9c6eda7abab1ec8c727db62bda68f73505d775b59ac76d345ffc4d78f388ba511666eadfb22c0d8691c033cc6963253925dfd6495832113941b961bf8392d15078fa687db22d2cbb67918071ac7d0feade4c7cac8f852159d04541d82cae3ffe6606ecede8bbc175b5e6d2c3ca2fce6f071aa484c8f0efc3c75c1fb4e0f4dbd7623ff8900b6fd6f30c6f1351303089f3e011bfed2059c0d92e6b11b2bf6c1e5205c2647cc6acc53acf17474a973c3b9af58470c047e0fb35b82ef78d08ba64b31be8734c6b70fd7083437def609e6eda305dab7817e32f547cc60d2387918855f3329503e722b226707c8184df194260e28a4c92906167b1d4a68db4df60c8e53cdbd13dfb41f8842c2e185f081950a6ac68478cd9d9a2e650cc1d023c230b64ec7eb7bfcdedaa3c08e41e4c93ecda6fc3ffed8992e07d49e87f216539070e4fe00ebaa2cbbe31d152d087d69e33d13ca4d57052c4e4c52c8351adfb4982b93d3f9d0bd443ccfae5999db6f0d55bf24a52067182a214561eb214742a49147b00340618d768e3d9d199bbd80c67bc91ab2975369dc5860fdf489562096fa57edd7b625b7cb0a198a4fd17d09441b2ae94a5da1d4f2463414fe14fb9ab1583a73bcdd1c94e545fcfebd0fd42bccc7d5bff4dd9e518b032805a35556173a730a21d4d892b96fcc27687f32729d6435815d9587c8b4f3874f569995c0dab3205aee7f65b7d507e6bfc2e425c49862901f4a071cb"], 0x6c}}, 0x400c004) r0 = socket(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r1, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=@newlink={0x20, 0x11, 0x40d, 0x0, 0x0, {0x10, 0x0, 0x0, r3}}, 0x20}}, 0x0) r4 = socket$nl_route(0x10, 0x3, 0x0) r5 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r4, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=@newlink={0x20, 0x11, 0x40d, 0x0, 0x0, {0x10, 0x0, 0x0, r6}}, 0x20}}, 0x0) ioctl$sock_ipv4_tunnel_SIOCDELTUNNEL(r0, 0x89f2, &(0x7f00000000c0)={'syztnl2\x00', &(0x7f0000000040)={'syztnl1\x00', 0x0, 0x10, 0x7800, 0xffffffff, 0x4, {{0x18, 0x4, 0x2, 0x0, 0x60, 0x67, 0x0, 0x6, 0x29, 0x0, @dev={0xac, 0x14, 0x14, 0xa}, @dev={0xac, 0x14, 0x14, 0x23}, {[@generic={0x86, 0xc, "093f18c11ec2242b7b52"}, @ra={0x94, 0x4}, @timestamp_addr={0x44, 0x3c, 0x77, 0x1, 0x8, [{@private=0xa010100}, {@empty, 0x5fd3b561}, {@dev={0xac, 0x14, 0x14, 0x44}, 0x5}, {@empty, 0x2}, {@loopback, 0x2}, {@dev={0xac, 0x14, 0x14, 0x19}, 0x40}, {@empty}]}]}}}}}) sendmsg$nl_route(r1, &(0x7f0000000200)={&(0x7f0000000000), 0xc, &(0x7f0000000100)={&(0x7f0000000180)=@newneigh={0x58, 0x1c, 0x2, 0x70bd2d, 0x25dfdbfb, {0xa, 0x0, 0x0, r6, 0x10, 0x4, 0x1}, [@NDA_DST_IPV6={0x14, 0x1, @remote}, @NDA_IFINDEX={0x8, 0x8, r7}, @NDA_VLAN={0x6, 0x5, 0x1}, @NDA_LLADDR={0xa, 0x2, @broadcast}, @NDA_DST_MAC={0xa, 0x1, @remote}]}, 0x58}, 0x1, 0x0, 0x0, 0x80}, 0x4004) sendmmsg$alg(r0, &(0x7f0000000140), 0x4924b68, 0x0) 19:46:44 executing program 3: syz_emit_ethernet(0x2e4, &(0x7f0000000200)={@broadcast, @empty, @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x2d6, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x880b, 0x221, 0x0, [], "3488d1f933d839a506b26f6661b6a9c77b71e07542794b9c0c12c3344783a3c0de3e30ac38c91e3a8731cddafa4cd27ea453ad5d096d6f0b06869f304dc2aa43272f6868e681cc3f04bfb4895c6beca76ba540d848562bba22e4a19d6c55b082dbdc6547c913af9de233ed818b854815bf742480fa582ad908344254b0dedaa23a8bd0830f999d3e12c2b8a7f8f03a102d2b357fb6f26e4e1191c4fc249a2705c154e7d4a9f7def6e29788cc56ce452f50565e07a90b02709cc7839ee413e67933cb8aa6adfd4f916c11d4aaa6cea7a907d88a55ed4aed0ba5c2f3e437197eccc11a4662f5f3fc8d77b2bc19a7e2e4803ebf1c730868f4286bdb1f21b7cb2dc267712e6f389b06b2bd89910404ca1e8026bdf416f7c5d22770324406002ff1bbe007e909aca81097f24bff098105a67e2f7f3ca72ddcb84e5df1d89f084f9886f98b7eebf8c1bc59a0b454cd4c2a8e90daddbc277f26bd47302b1f2718471113ba1130fd582ac43ed60d7f6aa387f563b35f2f8c57ab6c88d398effbb5b2b6c720916b3e7d3ffb4978da85b55e468ae4060f160b6e2f8416a21c7984149951a00ee5d6039a24e32536a3e3e914ebb439946279f7d370ad5c28cc1cfa9436f0644fe5acb15a3acd8b55d4ec0eb7af6681a1ba6133d76e4703d779ebb03f7f87c17a4981fce3194649af61139d4a5e189e2110c522371e42697095f65e2ba304075f758be2bc58f7bad4d7f5e48b7fe26c4e22689ecfcba6069d6c5c9b686863e1cc"}, {}, {}, {}, {}, {0x8, 0x6558, 0x3, "c199c0ca2e3f11671ef96fcc0fba6530c4fb25aef2158d8cdf0b7c8ba38960cab43ac9d3e454bc64852598a1fef63e2c45e37801ce6007a1cef34f84e3509447c97b4b29888138d1432601edde678403e87711d7900fd899342b46245d"}}}}}}, 0x0) [ 2610.825571][T26175] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:46:45 executing program 3: syz_emit_ethernet(0x2e4, &(0x7f0000000200)={@broadcast, @empty, @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x2d6, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x880b, 0x221, 0x0, [], "3488d1f933d839a506b26f6661b6a9c77b71e07542794b9c0c12c3344783a3c0de3e30ac38c91e3a8731cddafa4cd27ea453ad5d096d6f0b06869f304dc2aa43272f6868e681cc3f04bfb4895c6beca76ba540d848562bba22e4a19d6c55b082dbdc6547c913af9de233ed818b854815bf742480fa582ad908344254b0dedaa23a8bd0830f999d3e12c2b8a7f8f03a102d2b357fb6f26e4e1191c4fc249a2705c154e7d4a9f7def6e29788cc56ce452f50565e07a90b02709cc7839ee413e67933cb8aa6adfd4f916c11d4aaa6cea7a907d88a55ed4aed0ba5c2f3e437197eccc11a4662f5f3fc8d77b2bc19a7e2e4803ebf1c730868f4286bdb1f21b7cb2dc267712e6f389b06b2bd89910404ca1e8026bdf416f7c5d22770324406002ff1bbe007e909aca81097f24bff098105a67e2f7f3ca72ddcb84e5df1d89f084f9886f98b7eebf8c1bc59a0b454cd4c2a8e90daddbc277f26bd47302b1f2718471113ba1130fd582ac43ed60d7f6aa387f563b35f2f8c57ab6c88d398effbb5b2b6c720916b3e7d3ffb4978da85b55e468ae4060f160b6e2f8416a21c7984149951a00ee5d6039a24e32536a3e3e914ebb439946279f7d370ad5c28cc1cfa9436f0644fe5acb15a3acd8b55d4ec0eb7af6681a1ba6133d76e4703d779ebb03f7f87c17a4981fce3194649af61139d4a5e189e2110c522371e42697095f65e2ba304075f758be2bc58f7bad4d7f5e48b7fe26c4e22689ecfcba6069d6c5c9b686863e1cc"}, {}, {}, {}, {}, {0x8, 0x6558, 0x3, "c199c0ca2e3f11671ef96fcc0fba6530c4fb25aef2158d8cdf0b7c8ba38960cab43ac9d3e454bc64852598a1fef63e2c45e37801ce6007a1cef34f84e3509447c97b4b29888138d1432601edde678403e87711d7900fd899342b46245d"}}}}}}, 0x0) 19:46:45 executing program 0: bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x24, &(0x7f0000000180)='/proc/sys/net/ipv4/vs/sync_qlen_max\x00'}, 0x30) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000500)='freezer.self_freezing\x00', 0x275a, 0x0) (async) r1 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$NL80211_CMD_TRIGGER_SCAN(r1, &(0x7f0000000100)={&(0x7f0000000040), 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x40, 0x0, 0x20, 0x70bd25, 0x25dfdbfc, {{}, {@void, @val={0xc, 0x99, {0x5, 0x33}}}}, [@NL80211_ATTR_SCHED_SCAN_MULTI={0x4}, @NL80211_ATTR_BSSID={0xa, 0xf5, @random="03dad14e7f0a"}, @NL80211_ATTR_SCHED_SCAN_MATCH={0x10, 0x84, 0x0, 0x1, [@NL80211_SCHED_SCAN_MATCH_ATTR_SSID={0xa, 0x1, @default_ap_ssid}]}]}, 0x40}}, 0x4000) (async) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0xfea7) (async) mmap(&(0x7f0000000000/0x3000)=nil, 0x3000, 0x1, 0x10012, r0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r2, 0x0, 0x0) [ 2611.001521][T26175] bond1216: entered promiscuous mode [ 2611.063618][T26175] 8021q: adding VLAN 0 to HW filter on device bond1216 19:46:45 executing program 0: bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x24, &(0x7f0000000180)='/proc/sys/net/ipv4/vs/sync_qlen_max\x00'}, 0x30) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000500)='freezer.self_freezing\x00', 0x275a, 0x0) (async) r1 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$NL80211_CMD_TRIGGER_SCAN(r1, &(0x7f0000000100)={&(0x7f0000000040), 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x40, 0x0, 0x20, 0x70bd25, 0x25dfdbfc, {{}, {@void, @val={0xc, 0x99, {0x5, 0x33}}}}, [@NL80211_ATTR_SCHED_SCAN_MULTI={0x4}, @NL80211_ATTR_BSSID={0xa, 0xf5, @random="03dad14e7f0a"}, @NL80211_ATTR_SCHED_SCAN_MATCH={0x10, 0x84, 0x0, 0x1, [@NL80211_SCHED_SCAN_MATCH_ATTR_SSID={0xa, 0x1, @default_ap_ssid}]}]}, 0x40}}, 0x4000) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0xfea7) (async, rerun: 64) mmap(&(0x7f0000000000/0x3000)=nil, 0x3000, 0x1, 0x10012, r0, 0x0) (async, rerun: 64) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r2, 0x0, 0x0) [ 2611.112969][T26182] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:46:45 executing program 3: ioctl$sock_bt_bnep_BNEPCONNDEL(0xffffffffffffffff, 0x400442c9, &(0x7f0000000000)={0x3, @multicast}) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0xfd, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) r0 = syz_init_net_socket$llc(0x1a, 0x3, 0x0) getsockopt$sock_cred(r0, 0x1, 0x11, &(0x7f0000000040), &(0x7f0000000080)=0xc) 19:46:45 executing program 3: ioctl$sock_bt_bnep_BNEPCONNDEL(0xffffffffffffffff, 0x400442c9, &(0x7f0000000000)={0x3, @multicast}) (async, rerun: 64) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0xfd, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async, rerun: 64) r0 = syz_init_net_socket$llc(0x1a, 0x3, 0x0) getsockopt$sock_cred(r0, 0x1, 0x11, &(0x7f0000000040), &(0x7f0000000080)=0xc) [ 2611.357804][T26182] bond1102: entered promiscuous mode [ 2611.375824][T26182] 8021q: adding VLAN 0 to HW filter on device bond1102 19:46:45 executing program 0: bpf$BPF_TASK_FD_QUERY(0x14, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x24, &(0x7f0000000180)='/proc/sys/net/ipv4/vs/sync_qlen_max\x00'}, 0x30) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000500)='freezer.self_freezing\x00', 0x275a, 0x0) r1 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$NL80211_CMD_TRIGGER_SCAN(r1, &(0x7f0000000100)={&(0x7f0000000040), 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x40, 0x0, 0x20, 0x70bd25, 0x25dfdbfc, {{}, {@void, @val={0xc, 0x99, {0x5, 0x33}}}}, [@NL80211_ATTR_SCHED_SCAN_MULTI={0x4}, @NL80211_ATTR_BSSID={0xa, 0xf5, @random="03dad14e7f0a"}, @NL80211_ATTR_SCHED_SCAN_MATCH={0x10, 0x84, 0x0, 0x1, [@NL80211_SCHED_SCAN_MATCH_ATTR_SSID={0xa, 0x1, @default_ap_ssid}]}]}, 0x40}}, 0x4000) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0xfea7) mmap(&(0x7f0000000000/0x3000)=nil, 0x3000, 0x1, 0x10012, r0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r2, 0x0, 0x0) [ 2611.441437][T26181] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:46:45 executing program 3: ioctl$sock_bt_bnep_BNEPCONNDEL(0xffffffffffffffff, 0x400442c9, &(0x7f0000000000)={0x3, @multicast}) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0xfd, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) r0 = syz_init_net_socket$llc(0x1a, 0x3, 0x0) getsockopt$sock_cred(r0, 0x1, 0x11, &(0x7f0000000040), &(0x7f0000000080)=0xc) ioctl$sock_bt_bnep_BNEPCONNDEL(0xffffffffffffffff, 0x400442c9, &(0x7f0000000000)={0x3, @multicast}) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0xfd, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) syz_init_net_socket$llc(0x1a, 0x3, 0x0) (async) getsockopt$sock_cred(r0, 0x1, 0x11, &(0x7f0000000040), &(0x7f0000000080)=0xc) (async) [ 2611.644948][T26181] bond1177: entered promiscuous mode [ 2611.683781][T26181] 8021q: adding VLAN 0 to HW filter on device bond1177 [ 2611.770440][T26184] bond1102: (slave bridge1032): making interface the new active one [ 2611.781441][T26184] bridge1032: entered promiscuous mode [ 2611.797768][T26184] bond1102: (slave bridge1032): Enslaving as an active interface with an up link 19:46:46 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x32030000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2611.895542][T26185] bond1177: (slave bridge1080): making interface the new active one [ 2611.904893][T26185] bridge1080: entered promiscuous mode [ 2611.920223][T26185] bond1177: (slave bridge1080): Enslaving as an active interface with an up link [ 2612.017593][T26183] bond1216: (slave bridge1146): making interface the new active one [ 2612.025890][T26183] bridge1146: entered promiscuous mode [ 2612.038201][T26183] bond1216: (slave bridge1146): Enslaving as an active interface with an up link [ 2612.053803][T26195] __nla_validate_parse: 4310 callbacks suppressed 19:46:46 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x34020000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:46 executing program 0: socketpair$unix(0x1, 0x3, 0x0, &(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) sendmmsg$unix(r1, &(0x7f00000bd000), 0x318, 0x0) ioctl$int_in(r1, 0x5452, &(0x7f0000000000)=0x1) close(r0) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000080)=@bridge_dellink={0x6c, 0x11, 0x704, 0x70bd2b, 0x25dfdbfd, {0x7, 0x0, 0x0, 0x0, 0xc820, 0x40}, [@IFLA_EVENT={0x8, 0x2c, 0x1c42}, @IFLA_EVENT={0x8, 0x2c, 0x3}, @IFLA_EVENT={0x8, 0x2c, 0x3}, @IFLA_CARRIER_CHANGES={0x8, 0x23, 0x6}, @IFLA_BROADCAST={0xa, 0x2, @remote}, @IFLA_PHYS_PORT_ID={0x20, 0x22, "34ad5ef7ed2eb3116fbc0fdbab0516ce5ae1fed050adf60508e14a3e"}]}, 0x6c}, 0x1, 0x0, 0x0, 0x1}, 0x0) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = bpf$ITER_CREATE(0x21, &(0x7f00000001c0), 0x8) sendmsg$nl_route_sched(r4, &(0x7f0000000480)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x22582000}, 0xc, &(0x7f0000000440)={&(0x7f0000000280)=@delqdisc={0x1a8, 0x25, 0x4, 0x70bd27, 0x25dfdbfd, {0x0, 0x0, 0x0, 0x0, {0x9, 0x9}, {0x4e58d517c9d9cd, 0xfff1}, {0xffe0, 0xa}}, [@qdisc_kind_options=@q_fq_codel={{0xd}, {0x14, 0x2, [@TCA_FQ_CODEL_TARGET={0x8, 0x1, 0xfffffff7}, @TCA_FQ_CODEL_FLOWS={0x8, 0x5, 0x2}]}}, @TCA_STAB={0x2c, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x5, 0x3, 0x6c, 0x0, 0x0, 0x200, 0x101, 0x3}}, {0xa, 0x2, [0x7, 0x7, 0x3f]}}]}, @TCA_RATE={0x6, 0x5, {0x81, 0x9}}, @TCA_STAB={0x58, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x40, 0x0, 0xc39, 0x1, 0x3, 0x7, 0xffb, 0x7}}, {0x12, 0x2, [0x1, 0x3, 0x2, 0x9, 0x3ff, 0xcc7, 0x20]}}, {{0x1c, 0x1, {0x0, 0x40, 0x2, 0xffffb07f, 0x1, 0x2, 0xffffff00, 0x1}}, {0x6, 0x2, [0x6cf]}}]}, @TCA_EGRESS_BLOCK={0x8, 0xe, 0x9c}, @TCA_STAB={0x78, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x7, 0x99, 0x400, 0x9, 0x2, 0x6, 0x6, 0x1}}, {0x6, 0x2, [0x3]}}, {{0x1c, 0x1, {0x2, 0x18, 0x2, 0x1357c6bf, 0x1, 0x400, 0x8000, 0x2}}, {0x8, 0x2, [0xee4, 0x9]}}, {{0x1c, 0x1, {0x7, 0x80, 0x1ff, 0x3, 0x1, 0xf7, 0xe5b4, 0x6}}, {0x10, 0x2, [0x0, 0x7, 0x1, 0x0, 0x401, 0x80]}}]}, @TCA_EGRESS_BLOCK={0x8, 0xe, 0x3}, @qdisc_kind_options=@q_dsmark={{0xb}, {0x38, 0x2, [@TCA_DSMARK_INDICES={0x6, 0x1, 0x3a}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x8000}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x1}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x8}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0xffff}, @TCA_DSMARK_SET_TC_INDEX={0x4}]}}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0xfa04}]}, 0x1a8}, 0x1, 0x0, 0x0, 0x8000}, 0x811) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendmsg$NL80211_CMD_STOP_AP(r2, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x20, r5, 0x804, 0x70bd28, 0x25dfdbfb, {{}, {@void, @val={0xc, 0x99, {0x9, 0x27}}}}, ["", "", ""]}, 0x20}, 0x1, 0x0, 0x0, 0x20040818}, 0x800) [ 2612.053826][T26195] netlink: 76 bytes leftover after parsing attributes in process `syz-executor.2'. 19:46:46 executing program 2: sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000006c0)={&(0x7f0000000700)=ANY=[@ANYBLOB="6c0000001000010400"/20, @ANYRES8, @ANYBLOB="0526060000000000300012800b0001006272696467650000200002800c002e00fffff6ffffffffff050007001f1900000600c9d60000000008000a00a8", @ANYRESOCT=0x0, @ANYBLOB="2ccd13158772c364a7cf17d6c911a05a0b353e91f169d98ade4cead5a8fc78af5f2bc3200eeb31834404066dc3251e18e45a0ffa71d2ef3ce9cd20f72e625a59a9ba302b0b086782e0096356df1df5416cb10f19610f5a81362b100706e14f758a31dd473378c8a20cb3c6c191e0700d404ecfca4ce3773cb92f16c3841c479bee5effd25e37e3429c02ab2e38eee7b0fe16029e902cbd435d2ac45ed8d6207edbf5062bc7945ec7a1221176f4aaae644c0664b57f3b458c31e2b81dc1ab9beb7c24ebe767e04e8a6c5d2b888dc9f1c5c0e2c8e548baf275e0f53cb330cb9ca107215c9857d56cbe9f0b08d4b3f50e4993dd43afa2fd9e01b7e324ce9c4cfdbb62421108a9f7c7ddc4c3241a0f1698f4d0a114c305077fe20b01e1bcb23132a23d2cbdd8ef080addc94984a08b3f1f2de3e4a1efc186c8c04116443d3301d7023465797a1fde6f9326e9b0c4ecff4b4ee58919c5900c2d193f0f3fe964a8e467d648506ba8b9d79c7751c2c61bffb9a503abe13aa37c3f6de2879c05999a24c226f5c55bcd7631c92a041fc8e60db47529dd8978f77dc94c2cafa375d1cb78803035b9b78b97d3a83d417803d6fd7a2e5e5bdcff234a044980ce619b7804aa15a95ad0c4f5eaf7aed0d55907ffb0502d1424d485eac02477d917608001146ad62ead28a43d56f7f08a71a91451821d4533ea14c0beb47d9466f2ffcc4a5b84dab695a4a03d79b8736217419fc4b6912952e7b14bd8946f5da70a466166ef0afd8a21db37c9aaddfadc7136efe216e44a23952b39fedc796f2bb06c6048ed133d6887df953a96abc3ead5997ed442a0f9bed1f3db599c1fcf02d88bd16cfb85771bf99ea0375471d5e957e508fb0502ef9c14da41d48144b6e961266bc394af18d0c7cbf025d5008472e8f4602bb8b2081d35ef362ebdb21a964afb4d2a40675d30590994805f00aa5690068393c70ce532404e1ac7e61909d44e487ee4267205603ca20f78ebec10fc468ff27ce912eb6479241ca76d26b1dde798de23464b3f8c13f732bcce03c07a8546986ee9c468ec314cb9d0fa9a541b4fcf81eb7200f83a3a16db551d7c1eb77768f477f8bfd0e11a4a24dc8205ea561a34034a60a10d4e3dcf192b271edbbbd292d32a57b4cadf520623f1bb20bdef192467bb7eb8eb6c802d830e6a9b187a338420555636a61bac368193d99862a60ea336cbd1fd023ccfd99dd22079e49226f39be804ef551930bca3a7b7367dd6ca1398392ea7401b5aedc94adf5f0dff6d02b2abe2c293fc72b68d76ee95429ab626286102bae76d96b00e149d50d804460ee74d768e4ce6491f9aa0ae783fb9957a6a2b772ee22e82abf6d49e82905a9714647e9dcb75b04176468bb758cf97994c298ca0ab38e029d4f192d93650b5fdaf7fecd8bc741d158ceeb0861b306f1efa1864e769f59ea5e9d851a4a3d8959453a67e65a15f54173939255966fced1812e0f8220e692ab1de400ed331d6c0b02234b39b8eb53123c3b9da4f9ff8f31b5393e8942ee37808f6437ae8acab2e992e76e9d8c4ecd68040ec378bff010a7900cf7bda31ed3bbf4c74fb4c4cfb79e8ad68ec5cfb50eaee1bb232427d20eed887598af2027a76f467854e84b113ee0dbaf6af1d79cad64794eba6c34e8bbd6cce4aa0be202b1adb08f02421b180173435fa6a02fc79b8617109ab346c52b18f187be3d2eeaa4585e957ee118361979e9d756f2e67cbf708c47f7819556bf18103d8f92eb69c3d7c329c1c8c3bcab78cf977e23ea8c86ecf5f03af45f3cd9bb14bdac05336edce26aa0d4eb13071c076a18ddf71cc9c35ecc0b0be6aad7826de1337ccf16b019cf726b124126963437c0a8eca0a0d72e24fce86ae34840fbd588f467affda1bc929568088de10197d3ab3c17cadfc1a15c4c1c73d153dc7fafe1a19e9ead1fe26ec5c9509b3aab679736d78c83baa130786e328f70036a316aeebf8c88e6a4a8eb90746da501e38f281acb2b13fbce1cc7c1858406f42a12c137476756709e54a03caa25642dbc8be428c23a1d7643cd7515d9cc9bc84588fba6b00cc6805384992ff442a2419ff79a42a0080d32707da0128910e248e5a2caa36d6a4176f8325e5d20f5fd82754d9f333aebbbbc1b20ff409a88ab2ced681deb45b7a9c25165e1ede74a0ff1ef814a99898504e9f1d36edc14abbc7f1e9063ae1cd050e54fa1fe0e3954f40fa070f3b4391d597661c43133db2687ce101fd90a6972e7542ed9bb641615fe5853a0c6dd87b1e3773f73156180706a30e8038785218a9e6a29ac0ff9c136be1edd35c4da4997f8a4925165f2581ef70b097cf8b53659ad6a4d5c5e6c7f2fc28d7f65338e29f9357439c6e5173e2967948125445149546df2334e9c3c09340ed1a713b5b895e4e3953e0668bf0022e5643e642bf1207fed3c49ffb24c558e816b8fe413c22575dc8b8ef2664453187a74408d700feb5f2824d9efc41877716c0387d67e7a0e6f41de145cab9d178d2ae63e5c0a73ed46cdbe6db8e17e52dca3c32e50bb7d91f5d7615de78d02889a90d52cf872c257c6b3ef9e5a22044af5c3e298888a4b050fb6cd94b03e4605cafa5abec8813133dc08b33a49875af593ea447998df60888d6d73eb0bdfbbaf0a753b220eb584798cf5424411edb6366b83615cef6da0631897583d88781b05a7d185914b26f85718aea8be61c3c114a9458d4f414d91a1d70a7498624946d75278137954fe37e5d9e6e730dc5c4d1fcc0a793c64c0807914243fa39f8ab8afea87db514705674724820387accc221fab6d90dbd59b037da43a8e6cc26df4cbc591fbaceac2d34ebe5c81247a92eab1f82a5e0378984b2b0f5af63191b149ac8e658626590dc5d154dfe032c48955c27baae7622284dcbe0abd117be2cebb3afea4e94c19f76b8a58762ae5e71771452aec20dcb74b22da54f9346d3e7cbb253d4eb985532bf7b55686aefff30d5f3c035d6f984dfbcc1cef8813d4298a21d3a0c0d7ac87742528225fcee9ccf9c705af0c54bc7ca84f098e76f715b2fe0261f2da9201b09d7a70b40dd70a2b96b16009a7820694ff5015a08f106cff95aef79c7755ad184bb714993b295e53d65f5cd5aacbb4302285f9c941266a463ddcbd8a0ce27cb3e9e20b078e5debb3895bef6df9651116214eff3fd92454ed9f243106a6670dc48ad16d2b1e77dacb2edd149e430160e068fccc843296481981c104298cab7347d9e19f99018e44ddcdba7dab500c99dc0193aac4ea1de69b839988b7efe94b9e23a55c9b3707113e2f2449cc6a24dd6fec2cd156e4052d5c3b6a99bfaf2cae4cc736820279c3142156e42c8140c68003dd3a570724aafbab9014dc56e98bbba2b1e902eaadf5ccb5a7ed0e8763ef2857f5161b0f83bda666cf72619684295d3f3ef005bd6e054137c738f825e5631a8c0ba28a2771a29738719c7c5a54664a87d6233c1e53c969f44a9b42e1e0e0ee7c88c434720d16353e1f7569c200dcca69c49c02ef0b8548580c743830fb59d1da544d0a926fbbd663ce8ef3c0f39aa0380e7867ade8878628beb3cd7693f5a85b663cb8679fa6ea2402062f9ca1cc565f184ec6a37b6372a456a19d0e92b91033d852f5b3c8a51dcf867d950586a1539f3358c82cc871bab2bd9334e648bbbed69191498719a3c97f32faf9dbb8071f90333aeb3dd8187fb036ed62b41db2a53687f235dcf8bb64c57af3628207772f8dc9ae9f26e7676f85016290873e5bbe308f7fe3cffc29e89b70e2dba370bff92fb38f6e913a79a4cde814ad4db750e0986219550ee1db33bde2ec7636df7ac64673b0f900d67195a497adafd9c9fca44653f312b42237f2f4162e3c03b98d7862a1a0f69430e6d33ef515246626cd4972a8ce1a79d62148365a6e7712d2fd1a7f54c4f1087969e76d152e88ab42fa2f9f982428f6ce45871bd7a65034dc310df4027ac5e390fe0ac0836edbda58560c88d5121807d9d5b9a071142b99f10f7cfa25d35a7411b458972b75eb9e6aa7c784ee328891412d5f59d6eea9eb4d567049b7febc680ef47ef84c57894ced18e7789cffbf1dd4202201813b06ff6f80fa7f18278b375b3d89327fe8aeb29ea143212164738934ddf2f31e8a1a806089e9d1e5dc0b2ac70d5817cfcabf35d84e1fe55c94d6529657c5d03871d44c7a65ada7cee41bded205fc9a921f6db70d6f7ea380dab82dabd22d25c963f48955859f662f56ddb0b9fefdaad114a5aff2d1d57d78bcb07a41e75da2a156abb89f740c8872ae8b887f27590f7f42945c85fd891611defc72ab00a259099188dd019d8205c2c4327cb03839c1a72a883125babd96c846fcd2f9c62c1a45ed20eca722bfc124e7f1239d122dc624b94516e3c2e9c1472075ca3ff81817161f36df290adfd60d45e07128d7460e0da4653dd0f5a1d67660565c1ee91870fe7be42b0943780fc753a32a8146c478b31e49a9a87fc4d2d21deb53ec690d1fb014ee3563e2408679f9913bfaff80a0965370633c2f608b733f5314ce907c9b02b9a6673ce7ee431037d2be6b16180ccd9b25f1ff1f9c35a9f4c1149a36cb679e04a1752e2cfd48521ff1af851b572e8869eb32d513b61ed61326d9daa29a6e9dacc8166ffbf198fce74deaf3e6a535c75ea46319700c7e91c7bd7d53b2d54bebe57973c74c43fcc71aefd2f440a539f6a96b52c9a5d3d5f8d58864a4b32c5fefe32e31be9e1d6f37cedb3db9dcf5212f6a4347b9f095588f3f51d744e54888249bcc2813fe324e964838ef3417652d45aeba89c958aa67fa263cbeb6aff32db45dd9317eeef9a4513c43f74a19b9abdfca463c7105dc5d11ef6b7d004af7358ceea84ab92579eaa63c91fd08350028859ba348dd2b772ec9029ae2595a49679f303ebdb9ac69187a3f9752193d88c8e28bf6838024607b42df55a6de8d27a947d48a8572f99eb45d253c1c4213794b503cb73026714e2eacc599368d780e75b8a2ea5bbccee5abead13dedd732ba5a394ce290647fd15e2dd339e442b9c6eda7abab1ec8c727db62bda68f73505d775b59ac76d345ffc4d78f388ba511666eadfb22c0d8691c033cc6963253925dfd6495832113941b961bf8392d15078fa687db22d2cbb67918071ac7d0feade4c7cac8f852159d04541d82cae3ffe6606ecede8bbc175b5e6d2c3ca2fce6f071aa484c8f0efc3c75c1fb4e0f4dbd7623ff8900b6fd6f30c6f1351303089f3e011bfed2059c0d92e6b11b2bf6c1e5205c2647cc6acc53acf17474a973c3b9af58470c047e0fb35b82ef78d08ba64b31be8734c6b70fd7083437def609e6eda305dab7817e32f547cc60d2387918855f3329503e722b226707c8184df194260e28a4c92906167b1d4a68db4df60c8e53cdbd13dfb41f8842c2e185f081950a6ac68478cd9d9a2e650cc1d023c230b64ec7eb7bfcdedaa3c08e41e4c93ecda6fc3ffed8992e07d49e87f216539070e4fe00ebaa2cbbe31d152d087d69e33d13ca4d57052c4e4c52c8351adfb4982b93d3f9d0bd443ccfae5999db6f0d55bf24a52067182a214561eb214742a49147b00340618d768e3d9d199bbd80c67bc91ab2975369dc5860fdf489562096fa57edd7b625b7cb0a198a4fd17d09441b2ae94a5da1d4f2463414fe14fb9ab1583a73bcdd1c94e545fcfebd0fd42bccc7d5bff4dd9e518b032805a35556173a730a21d4d892b96fcc27687f32729d6435815d9587c8b4f3874f569995c0dab3205aee7f65b7d507e6bfc2e425c49862901f4a071cb"], 0x6c}}, 0x400c004) r0 = socket(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r1, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=@newlink={0x20, 0x11, 0x40d, 0x0, 0x0, {0x10, 0x0, 0x0, r3}}, 0x20}}, 0x0) r4 = socket$nl_route(0x10, 0x3, 0x0) r5 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r4, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=@newlink={0x20, 0x11, 0x40d, 0x0, 0x0, {0x10, 0x0, 0x0, r6}}, 0x20}}, 0x0) ioctl$sock_ipv4_tunnel_SIOCDELTUNNEL(r0, 0x89f2, &(0x7f00000000c0)={'syztnl2\x00', &(0x7f0000000040)={'syztnl1\x00', 0x0, 0x10, 0x7800, 0xffffffff, 0x4, {{0x18, 0x4, 0x2, 0x0, 0x60, 0x67, 0x0, 0x6, 0x29, 0x0, @dev={0xac, 0x14, 0x14, 0xa}, @dev={0xac, 0x14, 0x14, 0x23}, {[@generic={0x86, 0xc, "093f18c11ec2242b7b52"}, @ra={0x94, 0x4}, @timestamp_addr={0x44, 0x3c, 0x77, 0x1, 0x8, [{@private=0xa010100}, {@empty, 0x5fd3b561}, {@dev={0xac, 0x14, 0x14, 0x44}, 0x5}, {@empty, 0x2}, {@loopback, 0x2}, {@dev={0xac, 0x14, 0x14, 0x19}, 0x40}, {@empty}]}]}}}}}) sendmsg$nl_route(r1, &(0x7f0000000200)={&(0x7f0000000000), 0xc, &(0x7f0000000100)={&(0x7f0000000180)=@newneigh={0x58, 0x1c, 0x2, 0x70bd2d, 0x25dfdbfb, {0xa, 0x0, 0x0, r6, 0x10, 0x4, 0x1}, [@NDA_DST_IPV6={0x14, 0x1, @remote}, @NDA_IFINDEX={0x8, 0x8, r7}, @NDA_VLAN={0x6, 0x5, 0x1}, @NDA_LLADDR={0xa, 0x2, @broadcast}, @NDA_DST_MAC={0xa, 0x1, @remote}]}, 0x58}, 0x1, 0x0, 0x0, 0x80}, 0x4004) sendmmsg$alg(r0, &(0x7f0000000140), 0x4924b68, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000006c0)={&(0x7f0000000700)=ANY=[@ANYBLOB="6c0000001000010400"/20, @ANYRES8, @ANYBLOB="0526060000000000300012800b0001006272696467650000200002800c002e00fffff6ffffffffff050007001f1900000600c9d60000000008000a00a8", @ANYRESOCT=0x0, @ANYBLOB="2ccd13158772c364a7cf17d6c911a05a0b353e91f169d98ade4cead5a8fc78af5f2bc3200eeb31834404066dc3251e18e45a0ffa71d2ef3ce9cd20f72e625a59a9ba302b0b086782e0096356df1df5416cb10f19610f5a81362b100706e14f758a31dd473378c8a20cb3c6c191e0700d404ecfca4ce3773cb92f16c3841c479bee5effd25e37e3429c02ab2e38eee7b0fe16029e902cbd435d2ac45ed8d6207edbf5062bc7945ec7a1221176f4aaae644c0664b57f3b458c31e2b81dc1ab9beb7c24ebe767e04e8a6c5d2b888dc9f1c5c0e2c8e548baf275e0f53cb330cb9ca107215c9857d56cbe9f0b08d4b3f50e4993dd43afa2fd9e01b7e324ce9c4cfdbb62421108a9f7c7ddc4c3241a0f1698f4d0a114c305077fe20b01e1bcb23132a23d2cbdd8ef080addc94984a08b3f1f2de3e4a1efc186c8c04116443d3301d7023465797a1fde6f9326e9b0c4ecff4b4ee58919c5900c2d193f0f3fe964a8e467d648506ba8b9d79c7751c2c61bffb9a503abe13aa37c3f6de2879c05999a24c226f5c55bcd7631c92a041fc8e60db47529dd8978f77dc94c2cafa375d1cb78803035b9b78b97d3a83d417803d6fd7a2e5e5bdcff234a044980ce619b7804aa15a95ad0c4f5eaf7aed0d55907ffb0502d1424d485eac02477d917608001146ad62ead28a43d56f7f08a71a91451821d4533ea14c0beb47d9466f2ffcc4a5b84dab695a4a03d79b8736217419fc4b6912952e7b14bd8946f5da70a466166ef0afd8a21db37c9aaddfadc7136efe216e44a23952b39fedc796f2bb06c6048ed133d6887df953a96abc3ead5997ed442a0f9bed1f3db599c1fcf02d88bd16cfb85771bf99ea0375471d5e957e508fb0502ef9c14da41d48144b6e961266bc394af18d0c7cbf025d5008472e8f4602bb8b2081d35ef362ebdb21a964afb4d2a40675d30590994805f00aa5690068393c70ce532404e1ac7e61909d44e487ee4267205603ca20f78ebec10fc468ff27ce912eb6479241ca76d26b1dde798de23464b3f8c13f732bcce03c07a8546986ee9c468ec314cb9d0fa9a541b4fcf81eb7200f83a3a16db551d7c1eb77768f477f8bfd0e11a4a24dc8205ea561a34034a60a10d4e3dcf192b271edbbbd292d32a57b4cadf520623f1bb20bdef192467bb7eb8eb6c802d830e6a9b187a338420555636a61bac368193d99862a60ea336cbd1fd023ccfd99dd22079e49226f39be804ef551930bca3a7b7367dd6ca1398392ea7401b5aedc94adf5f0dff6d02b2abe2c293fc72b68d76ee95429ab626286102bae76d96b00e149d50d804460ee74d768e4ce6491f9aa0ae783fb9957a6a2b772ee22e82abf6d49e82905a9714647e9dcb75b04176468bb758cf97994c298ca0ab38e029d4f192d93650b5fdaf7fecd8bc741d158ceeb0861b306f1efa1864e769f59ea5e9d851a4a3d8959453a67e65a15f54173939255966fced1812e0f8220e692ab1de400ed331d6c0b02234b39b8eb53123c3b9da4f9ff8f31b5393e8942ee37808f6437ae8acab2e992e76e9d8c4ecd68040ec378bff010a7900cf7bda31ed3bbf4c74fb4c4cfb79e8ad68ec5cfb50eaee1bb232427d20eed887598af2027a76f467854e84b113ee0dbaf6af1d79cad64794eba6c34e8bbd6cce4aa0be202b1adb08f02421b180173435fa6a02fc79b8617109ab346c52b18f187be3d2eeaa4585e957ee118361979e9d756f2e67cbf708c47f7819556bf18103d8f92eb69c3d7c329c1c8c3bcab78cf977e23ea8c86ecf5f03af45f3cd9bb14bdac05336edce26aa0d4eb13071c076a18ddf71cc9c35ecc0b0be6aad7826de1337ccf16b019cf726b124126963437c0a8eca0a0d72e24fce86ae34840fbd588f467affda1bc929568088de10197d3ab3c17cadfc1a15c4c1c73d153dc7fafe1a19e9ead1fe26ec5c9509b3aab679736d78c83baa130786e328f70036a316aeebf8c88e6a4a8eb90746da501e38f281acb2b13fbce1cc7c1858406f42a12c137476756709e54a03caa25642dbc8be428c23a1d7643cd7515d9cc9bc84588fba6b00cc6805384992ff442a2419ff79a42a0080d32707da0128910e248e5a2caa36d6a4176f8325e5d20f5fd82754d9f333aebbbbc1b20ff409a88ab2ced681deb45b7a9c25165e1ede74a0ff1ef814a99898504e9f1d36edc14abbc7f1e9063ae1cd050e54fa1fe0e3954f40fa070f3b4391d597661c43133db2687ce101fd90a6972e7542ed9bb641615fe5853a0c6dd87b1e3773f73156180706a30e8038785218a9e6a29ac0ff9c136be1edd35c4da4997f8a4925165f2581ef70b097cf8b53659ad6a4d5c5e6c7f2fc28d7f65338e29f9357439c6e5173e2967948125445149546df2334e9c3c09340ed1a713b5b895e4e3953e0668bf0022e5643e642bf1207fed3c49ffb24c558e816b8fe413c22575dc8b8ef2664453187a74408d700feb5f2824d9efc41877716c0387d67e7a0e6f41de145cab9d178d2ae63e5c0a73ed46cdbe6db8e17e52dca3c32e50bb7d91f5d7615de78d02889a90d52cf872c257c6b3ef9e5a22044af5c3e298888a4b050fb6cd94b03e4605cafa5abec8813133dc08b33a49875af593ea447998df60888d6d73eb0bdfbbaf0a753b220eb584798cf5424411edb6366b83615cef6da0631897583d88781b05a7d185914b26f85718aea8be61c3c114a9458d4f414d91a1d70a7498624946d75278137954fe37e5d9e6e730dc5c4d1fcc0a793c64c0807914243fa39f8ab8afea87db514705674724820387accc221fab6d90dbd59b037da43a8e6cc26df4cbc591fbaceac2d34ebe5c81247a92eab1f82a5e0378984b2b0f5af63191b149ac8e658626590dc5d154dfe032c48955c27baae7622284dcbe0abd117be2cebb3afea4e94c19f76b8a58762ae5e71771452aec20dcb74b22da54f9346d3e7cbb253d4eb985532bf7b55686aefff30d5f3c035d6f984dfbcc1cef8813d4298a21d3a0c0d7ac87742528225fcee9ccf9c705af0c54bc7ca84f098e76f715b2fe0261f2da9201b09d7a70b40dd70a2b96b16009a7820694ff5015a08f106cff95aef79c7755ad184bb714993b295e53d65f5cd5aacbb4302285f9c941266a463ddcbd8a0ce27cb3e9e20b078e5debb3895bef6df9651116214eff3fd92454ed9f243106a6670dc48ad16d2b1e77dacb2edd149e430160e068fccc843296481981c104298cab7347d9e19f99018e44ddcdba7dab500c99dc0193aac4ea1de69b839988b7efe94b9e23a55c9b3707113e2f2449cc6a24dd6fec2cd156e4052d5c3b6a99bfaf2cae4cc736820279c3142156e42c8140c68003dd3a570724aafbab9014dc56e98bbba2b1e902eaadf5ccb5a7ed0e8763ef2857f5161b0f83bda666cf72619684295d3f3ef005bd6e054137c738f825e5631a8c0ba28a2771a29738719c7c5a54664a87d6233c1e53c969f44a9b42e1e0e0ee7c88c434720d16353e1f7569c200dcca69c49c02ef0b8548580c743830fb59d1da544d0a926fbbd663ce8ef3c0f39aa0380e7867ade8878628beb3cd7693f5a85b663cb8679fa6ea2402062f9ca1cc565f184ec6a37b6372a456a19d0e92b91033d852f5b3c8a51dcf867d950586a1539f3358c82cc871bab2bd9334e648bbbed69191498719a3c97f32faf9dbb8071f90333aeb3dd8187fb036ed62b41db2a53687f235dcf8bb64c57af3628207772f8dc9ae9f26e7676f85016290873e5bbe308f7fe3cffc29e89b70e2dba370bff92fb38f6e913a79a4cde814ad4db750e0986219550ee1db33bde2ec7636df7ac64673b0f900d67195a497adafd9c9fca44653f312b42237f2f4162e3c03b98d7862a1a0f69430e6d33ef515246626cd4972a8ce1a79d62148365a6e7712d2fd1a7f54c4f1087969e76d152e88ab42fa2f9f982428f6ce45871bd7a65034dc310df4027ac5e390fe0ac0836edbda58560c88d5121807d9d5b9a071142b99f10f7cfa25d35a7411b458972b75eb9e6aa7c784ee328891412d5f59d6eea9eb4d567049b7febc680ef47ef84c57894ced18e7789cffbf1dd4202201813b06ff6f80fa7f18278b375b3d89327fe8aeb29ea143212164738934ddf2f31e8a1a806089e9d1e5dc0b2ac70d5817cfcabf35d84e1fe55c94d6529657c5d03871d44c7a65ada7cee41bded205fc9a921f6db70d6f7ea380dab82dabd22d25c963f48955859f662f56ddb0b9fefdaad114a5aff2d1d57d78bcb07a41e75da2a156abb89f740c8872ae8b887f27590f7f42945c85fd891611defc72ab00a259099188dd019d8205c2c4327cb03839c1a72a883125babd96c846fcd2f9c62c1a45ed20eca722bfc124e7f1239d122dc624b94516e3c2e9c1472075ca3ff81817161f36df290adfd60d45e07128d7460e0da4653dd0f5a1d67660565c1ee91870fe7be42b0943780fc753a32a8146c478b31e49a9a87fc4d2d21deb53ec690d1fb014ee3563e2408679f9913bfaff80a0965370633c2f608b733f5314ce907c9b02b9a6673ce7ee431037d2be6b16180ccd9b25f1ff1f9c35a9f4c1149a36cb679e04a1752e2cfd48521ff1af851b572e8869eb32d513b61ed61326d9daa29a6e9dacc8166ffbf198fce74deaf3e6a535c75ea46319700c7e91c7bd7d53b2d54bebe57973c74c43fcc71aefd2f440a539f6a96b52c9a5d3d5f8d58864a4b32c5fefe32e31be9e1d6f37cedb3db9dcf5212f6a4347b9f095588f3f51d744e54888249bcc2813fe324e964838ef3417652d45aeba89c958aa67fa263cbeb6aff32db45dd9317eeef9a4513c43f74a19b9abdfca463c7105dc5d11ef6b7d004af7358ceea84ab92579eaa63c91fd08350028859ba348dd2b772ec9029ae2595a49679f303ebdb9ac69187a3f9752193d88c8e28bf6838024607b42df55a6de8d27a947d48a8572f99eb45d253c1c4213794b503cb73026714e2eacc599368d780e75b8a2ea5bbccee5abead13dedd732ba5a394ce290647fd15e2dd339e442b9c6eda7abab1ec8c727db62bda68f73505d775b59ac76d345ffc4d78f388ba511666eadfb22c0d8691c033cc6963253925dfd6495832113941b961bf8392d15078fa687db22d2cbb67918071ac7d0feade4c7cac8f852159d04541d82cae3ffe6606ecede8bbc175b5e6d2c3ca2fce6f071aa484c8f0efc3c75c1fb4e0f4dbd7623ff8900b6fd6f30c6f1351303089f3e011bfed2059c0d92e6b11b2bf6c1e5205c2647cc6acc53acf17474a973c3b9af58470c047e0fb35b82ef78d08ba64b31be8734c6b70fd7083437def609e6eda305dab7817e32f547cc60d2387918855f3329503e722b226707c8184df194260e28a4c92906167b1d4a68db4df60c8e53cdbd13dfb41f8842c2e185f081950a6ac68478cd9d9a2e650cc1d023c230b64ec7eb7bfcdedaa3c08e41e4c93ecda6fc3ffed8992e07d49e87f216539070e4fe00ebaa2cbbe31d152d087d69e33d13ca4d57052c4e4c52c8351adfb4982b93d3f9d0bd443ccfae5999db6f0d55bf24a52067182a214561eb214742a49147b00340618d768e3d9d199bbd80c67bc91ab2975369dc5860fdf489562096fa57edd7b625b7cb0a198a4fd17d09441b2ae94a5da1d4f2463414fe14fb9ab1583a73bcdd1c94e545fcfebd0fd42bccc7d5bff4dd9e518b032805a35556173a730a21d4d892b96fcc27687f32729d6435815d9587c8b4f3874f569995c0dab3205aee7f65b7d507e6bfc2e425c49862901f4a071cb"], 0x6c}}, 0x400c004) (async) socket(0x10, 0x3, 0x0) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket$packet(0x11, 0x3, 0x300) (async) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) (async) sendmsg$nl_route(r1, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=@newlink={0x20, 0x11, 0x40d, 0x0, 0x0, {0x10, 0x0, 0x0, r3}}, 0x20}}, 0x0) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket$packet(0x11, 0x3, 0x300) (async) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) (async) sendmsg$nl_route(r4, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=@newlink={0x20, 0x11, 0x40d, 0x0, 0x0, {0x10, 0x0, 0x0, r6}}, 0x20}}, 0x0) (async) ioctl$sock_ipv4_tunnel_SIOCDELTUNNEL(r0, 0x89f2, &(0x7f00000000c0)={'syztnl2\x00', &(0x7f0000000040)={'syztnl1\x00', 0x0, 0x10, 0x7800, 0xffffffff, 0x4, {{0x18, 0x4, 0x2, 0x0, 0x60, 0x67, 0x0, 0x6, 0x29, 0x0, @dev={0xac, 0x14, 0x14, 0xa}, @dev={0xac, 0x14, 0x14, 0x23}, {[@generic={0x86, 0xc, "093f18c11ec2242b7b52"}, @ra={0x94, 0x4}, @timestamp_addr={0x44, 0x3c, 0x77, 0x1, 0x8, [{@private=0xa010100}, {@empty, 0x5fd3b561}, {@dev={0xac, 0x14, 0x14, 0x44}, 0x5}, {@empty, 0x2}, {@loopback, 0x2}, {@dev={0xac, 0x14, 0x14, 0x19}, 0x40}, {@empty}]}]}}}}}) (async) sendmsg$nl_route(r1, &(0x7f0000000200)={&(0x7f0000000000), 0xc, &(0x7f0000000100)={&(0x7f0000000180)=@newneigh={0x58, 0x1c, 0x2, 0x70bd2d, 0x25dfdbfb, {0xa, 0x0, 0x0, r6, 0x10, 0x4, 0x1}, [@NDA_DST_IPV6={0x14, 0x1, @remote}, @NDA_IFINDEX={0x8, 0x8, r7}, @NDA_VLAN={0x6, 0x5, 0x1}, @NDA_LLADDR={0xa, 0x2, @broadcast}, @NDA_DST_MAC={0xa, 0x1, @remote}]}, 0x58}, 0x1, 0x0, 0x0, 0x80}, 0x4004) (async) sendmmsg$alg(r0, &(0x7f0000000140), 0x4924b68, 0x0) (async) 19:46:46 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{}, {}, {}, {}, {}, {0x8, 0x6558, 0x2}}}}}}, 0x0) 19:46:46 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x5d8d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2612.124082][T26225] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:46:46 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{}, {}, {}, {}, {}, {0x8, 0x6558, 0x2}}}}}}, 0x0) [ 2612.261734][T26225] bond1178: entered promiscuous mode [ 2612.286574][T26225] 8021q: adding VLAN 0 to HW filter on device bond1178 19:46:46 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{}, {}, {}, {}, {}, {0x8, 0x6558, 0x2}}}}}}, 0x0) 19:46:46 executing program 3: sendmsg$IPCTNL_MSG_EXP_NEW(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f00000002c0)={&(0x7f0000000100)=ANY=[@ANYBLOB="840100000002010600000000000000000600000a08000840000000040800054000000006080004400000000906000740000300000a000b00512e393331000000500003802c000180140003000000000000000000000000000000000114000400fc0200000000000000000000000000010c000280050001002f00000006000340000300000c0002800500010021000000f4000a80380002800c000280050001008400000006000340002000000c000280050001001100000014000180080001000000000008000200ac1414bbb80002800c000280050001008800000006000340000300000c00028005000100000000000c00028005000100000000002c00018014000300ff020000000000000000000000000001140004000000000000000000000000000000000106000340000400000c000280050001000100000006000700000000002c00018014000300fe8000000000000000000000000000bb14000400fc02000000000000000000000000000014000180080001000000000008000200e0000002"], 0x184}, 0x1, 0x0, 0x0, 0x80}, 0x10) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000000)={@void, @val={0x2, 0x80, 0x9, 0x400, 0xffff, 0x3}, @eth={@dev={'\xaa\xaa\xaa\xaa\xaa', 0xb}, @empty, @val={@val={0x9100, 0x1, 0x0, 0x4}, {0x8100, 0x7, 0x0, 0x1}}, {@mpls_mc={0x8848, {[{0x4, 0x0, 0x1}, {0x2}, {0x8}], @llc={@snap={0x0, 0x1, "ad", "8dbfda", 0x9100, "59128f6f77c56834eaccc823adaad150f7c4f8a9c2e3063e0fe383405ae851cfd484a0dd9fdcc4105925254a2b5147db72371312620f08a9f20c9a3b836056cbd8dbfeed8546b93a4ac4125c5f0bdf263301ae3a6a4e12"}}}}}}}, 0x8b) [ 2612.503567][T26226] bond1178: (slave bridge1081): making interface the new active one [ 2612.537247][T26226] bridge1081: entered promiscuous mode [ 2612.586605][T26226] bond1178: (slave bridge1081): Enslaving as an active interface with an up link 19:46:46 executing program 3: sendmsg$IPCTNL_MSG_EXP_NEW(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f00000002c0)={&(0x7f0000000100)=ANY=[@ANYBLOB="840100000002010600000000000000000600000a08000840000000040800054000000006080004400000000906000740000300000a000b00512e393331000000500003802c000180140003000000000000000000000000000000000114000400fc0200000000000000000000000000010c000280050001002f00000006000340000300000c0002800500010021000000f4000a80380002800c000280050001008400000006000340002000000c000280050001001100000014000180080001000000000008000200ac1414bbb80002800c000280050001008800000006000340000300000c00028005000100000000000c00028005000100000000002c00018014000300ff020000000000000000000000000001140004000000000000000000000000000000000106000340000400000c000280050001000100000006000700000000002c00018014000300fe8000000000000000000000000000bb14000400fc02000000000000000000000000000014000180080001000000000008000200e0000002"], 0x184}, 0x1, 0x0, 0x0, 0x80}, 0x10) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000000)={@void, @val={0x2, 0x80, 0x9, 0x400, 0xffff, 0x3}, @eth={@dev={'\xaa\xaa\xaa\xaa\xaa', 0xb}, @empty, @val={@val={0x9100, 0x1, 0x0, 0x4}, {0x8100, 0x7, 0x0, 0x1}}, {@mpls_mc={0x8848, {[{0x4, 0x0, 0x1}, {0x2}, {0x8}], @llc={@snap={0x0, 0x1, "ad", "8dbfda", 0x9100, "59128f6f77c56834eaccc823adaad150f7c4f8a9c2e3063e0fe383405ae851cfd484a0dd9fdcc4105925254a2b5147db72371312620f08a9f20c9a3b836056cbd8dbfeed8546b93a4ac4125c5f0bdf263301ae3a6a4e12"}}}}}}}, 0x8b) sendmsg$IPCTNL_MSG_EXP_NEW(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f00000002c0)={&(0x7f0000000100)=ANY=[@ANYBLOB="840100000002010600000000000000000600000a08000840000000040800054000000006080004400000000906000740000300000a000b00512e393331000000500003802c000180140003000000000000000000000000000000000114000400fc0200000000000000000000000000010c000280050001002f00000006000340000300000c0002800500010021000000f4000a80380002800c000280050001008400000006000340002000000c000280050001001100000014000180080001000000000008000200ac1414bbb80002800c000280050001008800000006000340000300000c00028005000100000000000c00028005000100000000002c00018014000300ff020000000000000000000000000001140004000000000000000000000000000000000106000340000400000c000280050001000100000006000700000000002c00018014000300fe8000000000000000000000000000bb14000400fc02000000000000000000000000000014000180080001000000000008000200e0000002"], 0x184}, 0x1, 0x0, 0x0, 0x80}, 0x10) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) write$tun(0xffffffffffffffff, &(0x7f0000000000)={@void, @val={0x2, 0x80, 0x9, 0x400, 0xffff, 0x3}, @eth={@dev={'\xaa\xaa\xaa\xaa\xaa', 0xb}, @empty, @val={@val={0x9100, 0x1, 0x0, 0x4}, {0x8100, 0x7, 0x0, 0x1}}, {@mpls_mc={0x8848, {[{0x4, 0x0, 0x1}, {0x2}, {0x8}], @llc={@snap={0x0, 0x1, "ad", "8dbfda", 0x9100, "59128f6f77c56834eaccc823adaad150f7c4f8a9c2e3063e0fe383405ae851cfd484a0dd9fdcc4105925254a2b5147db72371312620f08a9f20c9a3b836056cbd8dbfeed8546b93a4ac4125c5f0bdf263301ae3a6a4e12"}}}}}}}, 0x8b) (async) [ 2612.634576][T26235] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:46:46 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3c000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2612.717123][T26235] bond1103: entered promiscuous mode [ 2612.723017][T26235] 8021q: adding VLAN 0 to HW filter on device bond1103 19:46:46 executing program 3: sendmsg$IPCTNL_MSG_EXP_NEW(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f00000002c0)={&(0x7f0000000100)=ANY=[@ANYBLOB="840100000002010600000000000000000600000a08000840000000040800054000000006080004400000000906000740000300000a000b00512e393331000000500003802c000180140003000000000000000000000000000000000114000400fc0200000000000000000000000000010c000280050001002f00000006000340000300000c0002800500010021000000f4000a80380002800c000280050001008400000006000340002000000c000280050001001100000014000180080001000000000008000200ac1414bbb80002800c000280050001008800000006000340000300000c00028005000100000000000c00028005000100000000002c00018014000300ff020000000000000000000000000001140004000000000000000000000000000000000106000340000400000c000280050001000100000006000700000000002c00018014000300fe8000000000000000000000000000bb14000400fc02000000000000000000000000000014000180080001000000000008000200e0000002"], 0x184}, 0x1, 0x0, 0x0, 0x80}, 0x10) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000000)={@void, @val={0x2, 0x80, 0x9, 0x400, 0xffff, 0x3}, @eth={@dev={'\xaa\xaa\xaa\xaa\xaa', 0xb}, @empty, @val={@val={0x9100, 0x1, 0x0, 0x4}, {0x8100, 0x7, 0x0, 0x1}}, {@mpls_mc={0x8848, {[{0x4, 0x0, 0x1}, {0x2}, {0x8}], @llc={@snap={0x0, 0x1, "ad", "8dbfda", 0x9100, "59128f6f77c56834eaccc823adaad150f7c4f8a9c2e3063e0fe383405ae851cfd484a0dd9fdcc4105925254a2b5147db72371312620f08a9f20c9a3b836056cbd8dbfeed8546b93a4ac4125c5f0bdf263301ae3a6a4e12"}}}}}}}, 0x8b) [ 2612.783878][T26239] bond1103: (slave bridge1033): making interface the new active one [ 2612.792075][T26239] bridge1033: entered promiscuous mode [ 2612.805383][T26239] bond1103: (slave bridge1033): Enslaving as an active interface with an up link [ 2612.815805][T26241] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2612.895282][T26241] bond1217: entered promiscuous mode [ 2612.907933][T26241] 8021q: adding VLAN 0 to HW filter on device bond1217 19:46:47 executing program 0: socketpair$unix(0x1, 0x3, 0x0, &(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) sendmmsg$unix(r1, &(0x7f00000bd000), 0x318, 0x0) ioctl$int_in(r1, 0x5452, &(0x7f0000000000)=0x1) close(r0) socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000080)=@bridge_dellink={0x6c, 0x11, 0x704, 0x70bd2b, 0x25dfdbfd, {0x7, 0x0, 0x0, 0x0, 0xc820, 0x40}, [@IFLA_EVENT={0x8, 0x2c, 0x1c42}, @IFLA_EVENT={0x8, 0x2c, 0x3}, @IFLA_EVENT={0x8, 0x2c, 0x3}, @IFLA_CARRIER_CHANGES={0x8, 0x23, 0x6}, @IFLA_BROADCAST={0xa, 0x2, @remote}, @IFLA_PHYS_PORT_ID={0x20, 0x22, "34ad5ef7ed2eb3116fbc0fdbab0516ce5ae1fed050adf60508e14a3e"}]}, 0x6c}, 0x1, 0x0, 0x0, 0x1}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000080)=@bridge_dellink={0x6c, 0x11, 0x704, 0x70bd2b, 0x25dfdbfd, {0x7, 0x0, 0x0, 0x0, 0xc820, 0x40}, [@IFLA_EVENT={0x8, 0x2c, 0x1c42}, @IFLA_EVENT={0x8, 0x2c, 0x3}, @IFLA_EVENT={0x8, 0x2c, 0x3}, @IFLA_CARRIER_CHANGES={0x8, 0x23, 0x6}, @IFLA_BROADCAST={0xa, 0x2, @remote}, @IFLA_PHYS_PORT_ID={0x20, 0x22, "34ad5ef7ed2eb3116fbc0fdbab0516ce5ae1fed050adf60508e14a3e"}]}, 0x6c}, 0x1, 0x0, 0x0, 0x1}, 0x0) socket$nl_generic(0x10, 0x3, 0x10) (async) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = bpf$ITER_CREATE(0x21, &(0x7f00000001c0), 0x8) sendmsg$nl_route_sched(r4, &(0x7f0000000480)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x22582000}, 0xc, &(0x7f0000000440)={&(0x7f0000000280)=@delqdisc={0x1a8, 0x25, 0x4, 0x70bd27, 0x25dfdbfd, {0x0, 0x0, 0x0, 0x0, {0x9, 0x9}, {0x4e58d517c9d9cd, 0xfff1}, {0xffe0, 0xa}}, [@qdisc_kind_options=@q_fq_codel={{0xd}, {0x14, 0x2, [@TCA_FQ_CODEL_TARGET={0x8, 0x1, 0xfffffff7}, @TCA_FQ_CODEL_FLOWS={0x8, 0x5, 0x2}]}}, @TCA_STAB={0x2c, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x5, 0x3, 0x6c, 0x0, 0x0, 0x200, 0x101, 0x3}}, {0xa, 0x2, [0x7, 0x7, 0x3f]}}]}, @TCA_RATE={0x6, 0x5, {0x81, 0x9}}, @TCA_STAB={0x58, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x40, 0x0, 0xc39, 0x1, 0x3, 0x7, 0xffb, 0x7}}, {0x12, 0x2, [0x1, 0x3, 0x2, 0x9, 0x3ff, 0xcc7, 0x20]}}, {{0x1c, 0x1, {0x0, 0x40, 0x2, 0xffffb07f, 0x1, 0x2, 0xffffff00, 0x1}}, {0x6, 0x2, [0x6cf]}}]}, @TCA_EGRESS_BLOCK={0x8, 0xe, 0x9c}, @TCA_STAB={0x78, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x7, 0x99, 0x400, 0x9, 0x2, 0x6, 0x6, 0x1}}, {0x6, 0x2, [0x3]}}, {{0x1c, 0x1, {0x2, 0x18, 0x2, 0x1357c6bf, 0x1, 0x400, 0x8000, 0x2}}, {0x8, 0x2, [0xee4, 0x9]}}, {{0x1c, 0x1, {0x7, 0x80, 0x1ff, 0x3, 0x1, 0xf7, 0xe5b4, 0x6}}, {0x10, 0x2, [0x0, 0x7, 0x1, 0x0, 0x401, 0x80]}}]}, @TCA_EGRESS_BLOCK={0x8, 0xe, 0x3}, @qdisc_kind_options=@q_dsmark={{0xb}, {0x38, 0x2, [@TCA_DSMARK_INDICES={0x6, 0x1, 0x3a}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x8000}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x1}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x8}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0xffff}, @TCA_DSMARK_SET_TC_INDEX={0x4}]}}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0xfa04}]}, 0x1a8}, 0x1, 0x0, 0x0, 0x8000}, 0x811) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendmsg$NL80211_CMD_STOP_AP(r2, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x20, r5, 0x804, 0x70bd28, 0x25dfdbfb, {{}, {@void, @val={0xc, 0x99, {0x9, 0x27}}}}, ["", "", ""]}, 0x20}, 0x1, 0x0, 0x0, 0x20040818}, 0x800) [ 2612.982018][T26247] bond1217: (slave bridge1147): making interface the new active one [ 2612.990242][T26247] bridge1147: entered promiscuous mode [ 2613.002285][T26247] bond1217: (slave bridge1147): Enslaving as an active interface with an up link 19:46:47 executing program 2: sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000006c0)={&(0x7f0000000700)=ANY=[@ANYBLOB="6c0000001000010400"/20, @ANYRES8, @ANYBLOB="0526060000000000300012800b0001006272696467650000200002800c002e00fffff6ffffffffff050007001f1900000600c9d60000000008000a00a8", @ANYRESOCT=0x0, @ANYBLOB="2ccd13158772c364a7cf17d6c911a05a0b353e91f169d98ade4cead5a8fc78af5f2bc3200eeb31834404066dc3251e18e45a0ffa71d2ef3ce9cd20f72e625a59a9ba302b0b086782e0096356df1df5416cb10f19610f5a81362b100706e14f758a31dd473378c8a20cb3c6c191e0700d404ecfca4ce3773cb92f16c3841c479bee5effd25e37e3429c02ab2e38eee7b0fe16029e902cbd435d2ac45ed8d6207edbf5062bc7945ec7a1221176f4aaae644c0664b57f3b458c31e2b81dc1ab9beb7c24ebe767e04e8a6c5d2b888dc9f1c5c0e2c8e548baf275e0f53cb330cb9ca107215c9857d56cbe9f0b08d4b3f50e4993dd43afa2fd9e01b7e324ce9c4cfdbb62421108a9f7c7ddc4c3241a0f1698f4d0a114c305077fe20b01e1bcb23132a23d2cbdd8ef080addc94984a08b3f1f2de3e4a1efc186c8c04116443d3301d7023465797a1fde6f9326e9b0c4ecff4b4ee58919c5900c2d193f0f3fe964a8e467d648506ba8b9d79c7751c2c61bffb9a503abe13aa37c3f6de2879c05999a24c226f5c55bcd7631c92a041fc8e60db47529dd8978f77dc94c2cafa375d1cb78803035b9b78b97d3a83d417803d6fd7a2e5e5bdcff234a044980ce619b7804aa15a95ad0c4f5eaf7aed0d55907ffb0502d1424d485eac02477d917608001146ad62ead28a43d56f7f08a71a91451821d4533ea14c0beb47d9466f2ffcc4a5b84dab695a4a03d79b8736217419fc4b6912952e7b14bd8946f5da70a466166ef0afd8a21db37c9aaddfadc7136efe216e44a23952b39fedc796f2bb06c6048ed133d6887df953a96abc3ead5997ed442a0f9bed1f3db599c1fcf02d88bd16cfb85771bf99ea0375471d5e957e508fb0502ef9c14da41d48144b6e961266bc394af18d0c7cbf025d5008472e8f4602bb8b2081d35ef362ebdb21a964afb4d2a40675d30590994805f00aa5690068393c70ce532404e1ac7e61909d44e487ee4267205603ca20f78ebec10fc468ff27ce912eb6479241ca76d26b1dde798de23464b3f8c13f732bcce03c07a8546986ee9c468ec314cb9d0fa9a541b4fcf81eb7200f83a3a16db551d7c1eb77768f477f8bfd0e11a4a24dc8205ea561a34034a60a10d4e3dcf192b271edbbbd292d32a57b4cadf520623f1bb20bdef192467bb7eb8eb6c802d830e6a9b187a338420555636a61bac368193d99862a60ea336cbd1fd023ccfd99dd22079e49226f39be804ef551930bca3a7b7367dd6ca1398392ea7401b5aedc94adf5f0dff6d02b2abe2c293fc72b68d76ee95429ab626286102bae76d96b00e149d50d804460ee74d768e4ce6491f9aa0ae783fb9957a6a2b772ee22e82abf6d49e82905a9714647e9dcb75b04176468bb758cf97994c298ca0ab38e029d4f192d93650b5fdaf7fecd8bc741d158ceeb0861b306f1efa1864e769f59ea5e9d851a4a3d8959453a67e65a15f54173939255966fced1812e0f8220e692ab1de400ed331d6c0b02234b39b8eb53123c3b9da4f9ff8f31b5393e8942ee37808f6437ae8acab2e992e76e9d8c4ecd68040ec378bff010a7900cf7bda31ed3bbf4c74fb4c4cfb79e8ad68ec5cfb50eaee1bb232427d20eed887598af2027a76f467854e84b113ee0dbaf6af1d79cad64794eba6c34e8bbd6cce4aa0be202b1adb08f02421b180173435fa6a02fc79b8617109ab346c52b18f187be3d2eeaa4585e957ee118361979e9d756f2e67cbf708c47f7819556bf18103d8f92eb69c3d7c329c1c8c3bcab78cf977e23ea8c86ecf5f03af45f3cd9bb14bdac05336edce26aa0d4eb13071c076a18ddf71cc9c35ecc0b0be6aad7826de1337ccf16b019cf726b124126963437c0a8eca0a0d72e24fce86ae34840fbd588f467affda1bc929568088de10197d3ab3c17cadfc1a15c4c1c73d153dc7fafe1a19e9ead1fe26ec5c9509b3aab679736d78c83baa130786e328f70036a316aeebf8c88e6a4a8eb90746da501e38f281acb2b13fbce1cc7c1858406f42a12c137476756709e54a03caa25642dbc8be428c23a1d7643cd7515d9cc9bc84588fba6b00cc6805384992ff442a2419ff79a42a0080d32707da0128910e248e5a2caa36d6a4176f8325e5d20f5fd82754d9f333aebbbbc1b20ff409a88ab2ced681deb45b7a9c25165e1ede74a0ff1ef814a99898504e9f1d36edc14abbc7f1e9063ae1cd050e54fa1fe0e3954f40fa070f3b4391d597661c43133db2687ce101fd90a6972e7542ed9bb641615fe5853a0c6dd87b1e3773f73156180706a30e8038785218a9e6a29ac0ff9c136be1edd35c4da4997f8a4925165f2581ef70b097cf8b53659ad6a4d5c5e6c7f2fc28d7f65338e29f9357439c6e5173e2967948125445149546df2334e9c3c09340ed1a713b5b895e4e3953e0668bf0022e5643e642bf1207fed3c49ffb24c558e816b8fe413c22575dc8b8ef2664453187a74408d700feb5f2824d9efc41877716c0387d67e7a0e6f41de145cab9d178d2ae63e5c0a73ed46cdbe6db8e17e52dca3c32e50bb7d91f5d7615de78d02889a90d52cf872c257c6b3ef9e5a22044af5c3e298888a4b050fb6cd94b03e4605cafa5abec8813133dc08b33a49875af593ea447998df60888d6d73eb0bdfbbaf0a753b220eb584798cf5424411edb6366b83615cef6da0631897583d88781b05a7d185914b26f85718aea8be61c3c114a9458d4f414d91a1d70a7498624946d75278137954fe37e5d9e6e730dc5c4d1fcc0a793c64c0807914243fa39f8ab8afea87db514705674724820387accc221fab6d90dbd59b037da43a8e6cc26df4cbc591fbaceac2d34ebe5c81247a92eab1f82a5e0378984b2b0f5af63191b149ac8e658626590dc5d154dfe032c48955c27baae7622284dcbe0abd117be2cebb3afea4e94c19f76b8a58762ae5e71771452aec20dcb74b22da54f9346d3e7cbb253d4eb985532bf7b55686aefff30d5f3c035d6f984dfbcc1cef8813d4298a21d3a0c0d7ac87742528225fcee9ccf9c705af0c54bc7ca84f098e76f715b2fe0261f2da9201b09d7a70b40dd70a2b96b16009a7820694ff5015a08f106cff95aef79c7755ad184bb714993b295e53d65f5cd5aacbb4302285f9c941266a463ddcbd8a0ce27cb3e9e20b078e5debb3895bef6df9651116214eff3fd92454ed9f243106a6670dc48ad16d2b1e77dacb2edd149e430160e068fccc843296481981c104298cab7347d9e19f99018e44ddcdba7dab500c99dc0193aac4ea1de69b839988b7efe94b9e23a55c9b3707113e2f2449cc6a24dd6fec2cd156e4052d5c3b6a99bfaf2cae4cc736820279c3142156e42c8140c68003dd3a570724aafbab9014dc56e98bbba2b1e902eaadf5ccb5a7ed0e8763ef2857f5161b0f83bda666cf72619684295d3f3ef005bd6e054137c738f825e5631a8c0ba28a2771a29738719c7c5a54664a87d6233c1e53c969f44a9b42e1e0e0ee7c88c434720d16353e1f7569c200dcca69c49c02ef0b8548580c743830fb59d1da544d0a926fbbd663ce8ef3c0f39aa0380e7867ade8878628beb3cd7693f5a85b663cb8679fa6ea2402062f9ca1cc565f184ec6a37b6372a456a19d0e92b91033d852f5b3c8a51dcf867d950586a1539f3358c82cc871bab2bd9334e648bbbed69191498719a3c97f32faf9dbb8071f90333aeb3dd8187fb036ed62b41db2a53687f235dcf8bb64c57af3628207772f8dc9ae9f26e7676f85016290873e5bbe308f7fe3cffc29e89b70e2dba370bff92fb38f6e913a79a4cde814ad4db750e0986219550ee1db33bde2ec7636df7ac64673b0f900d67195a497adafd9c9fca44653f312b42237f2f4162e3c03b98d7862a1a0f69430e6d33ef515246626cd4972a8ce1a79d62148365a6e7712d2fd1a7f54c4f1087969e76d152e88ab42fa2f9f982428f6ce45871bd7a65034dc310df4027ac5e390fe0ac0836edbda58560c88d5121807d9d5b9a071142b99f10f7cfa25d35a7411b458972b75eb9e6aa7c784ee328891412d5f59d6eea9eb4d567049b7febc680ef47ef84c57894ced18e7789cffbf1dd4202201813b06ff6f80fa7f18278b375b3d89327fe8aeb29ea143212164738934ddf2f31e8a1a806089e9d1e5dc0b2ac70d5817cfcabf35d84e1fe55c94d6529657c5d03871d44c7a65ada7cee41bded205fc9a921f6db70d6f7ea380dab82dabd22d25c963f48955859f662f56ddb0b9fefdaad114a5aff2d1d57d78bcb07a41e75da2a156abb89f740c8872ae8b887f27590f7f42945c85fd891611defc72ab00a259099188dd019d8205c2c4327cb03839c1a72a883125babd96c846fcd2f9c62c1a45ed20eca722bfc124e7f1239d122dc624b94516e3c2e9c1472075ca3ff81817161f36df290adfd60d45e07128d7460e0da4653dd0f5a1d67660565c1ee91870fe7be42b0943780fc753a32a8146c478b31e49a9a87fc4d2d21deb53ec690d1fb014ee3563e2408679f9913bfaff80a0965370633c2f608b733f5314ce907c9b02b9a6673ce7ee431037d2be6b16180ccd9b25f1ff1f9c35a9f4c1149a36cb679e04a1752e2cfd48521ff1af851b572e8869eb32d513b61ed61326d9daa29a6e9dacc8166ffbf198fce74deaf3e6a535c75ea46319700c7e91c7bd7d53b2d54bebe57973c74c43fcc71aefd2f440a539f6a96b52c9a5d3d5f8d58864a4b32c5fefe32e31be9e1d6f37cedb3db9dcf5212f6a4347b9f095588f3f51d744e54888249bcc2813fe324e964838ef3417652d45aeba89c958aa67fa263cbeb6aff32db45dd9317eeef9a4513c43f74a19b9abdfca463c7105dc5d11ef6b7d004af7358ceea84ab92579eaa63c91fd08350028859ba348dd2b772ec9029ae2595a49679f303ebdb9ac69187a3f9752193d88c8e28bf6838024607b42df55a6de8d27a947d48a8572f99eb45d253c1c4213794b503cb73026714e2eacc599368d780e75b8a2ea5bbccee5abead13dedd732ba5a394ce290647fd15e2dd339e442b9c6eda7abab1ec8c727db62bda68f73505d775b59ac76d345ffc4d78f388ba511666eadfb22c0d8691c033cc6963253925dfd6495832113941b961bf8392d15078fa687db22d2cbb67918071ac7d0feade4c7cac8f852159d04541d82cae3ffe6606ecede8bbc175b5e6d2c3ca2fce6f071aa484c8f0efc3c75c1fb4e0f4dbd7623ff8900b6fd6f30c6f1351303089f3e011bfed2059c0d92e6b11b2bf6c1e5205c2647cc6acc53acf17474a973c3b9af58470c047e0fb35b82ef78d08ba64b31be8734c6b70fd7083437def609e6eda305dab7817e32f547cc60d2387918855f3329503e722b226707c8184df194260e28a4c92906167b1d4a68db4df60c8e53cdbd13dfb41f8842c2e185f081950a6ac68478cd9d9a2e650cc1d023c230b64ec7eb7bfcdedaa3c08e41e4c93ecda6fc3ffed8992e07d49e87f216539070e4fe00ebaa2cbbe31d152d087d69e33d13ca4d57052c4e4c52c8351adfb4982b93d3f9d0bd443ccfae5999db6f0d55bf24a52067182a214561eb214742a49147b00340618d768e3d9d199bbd80c67bc91ab2975369dc5860fdf489562096fa57edd7b625b7cb0a198a4fd17d09441b2ae94a5da1d4f2463414fe14fb9ab1583a73bcdd1c94e545fcfebd0fd42bccc7d5bff4dd9e518b032805a35556173a730a21d4d892b96fcc27687f32729d6435815d9587c8b4f3874f569995c0dab3205aee7f65b7d507e6bfc2e425c49862901f4a071cb"], 0x6c}}, 0x400c004) (async) r0 = socket(0x10, 0x3, 0x0) (async) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r1, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=@newlink={0x20, 0x11, 0x40d, 0x0, 0x0, {0x10, 0x0, 0x0, r3}}, 0x20}}, 0x0) r4 = socket$nl_route(0x10, 0x3, 0x0) r5 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r4, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=@newlink={0x20, 0x11, 0x40d, 0x0, 0x0, {0x10, 0x0, 0x0, r6}}, 0x20}}, 0x0) (async, rerun: 64) ioctl$sock_ipv4_tunnel_SIOCDELTUNNEL(r0, 0x89f2, &(0x7f00000000c0)={'syztnl2\x00', &(0x7f0000000040)={'syztnl1\x00', 0x0, 0x10, 0x7800, 0xffffffff, 0x4, {{0x18, 0x4, 0x2, 0x0, 0x60, 0x67, 0x0, 0x6, 0x29, 0x0, @dev={0xac, 0x14, 0x14, 0xa}, @dev={0xac, 0x14, 0x14, 0x23}, {[@generic={0x86, 0xc, "093f18c11ec2242b7b52"}, @ra={0x94, 0x4}, @timestamp_addr={0x44, 0x3c, 0x77, 0x1, 0x8, [{@private=0xa010100}, {@empty, 0x5fd3b561}, {@dev={0xac, 0x14, 0x14, 0x44}, 0x5}, {@empty, 0x2}, {@loopback, 0x2}, {@dev={0xac, 0x14, 0x14, 0x19}, 0x40}, {@empty}]}]}}}}}) (rerun: 64) sendmsg$nl_route(r1, &(0x7f0000000200)={&(0x7f0000000000), 0xc, &(0x7f0000000100)={&(0x7f0000000180)=@newneigh={0x58, 0x1c, 0x2, 0x70bd2d, 0x25dfdbfb, {0xa, 0x0, 0x0, r6, 0x10, 0x4, 0x1}, [@NDA_DST_IPV6={0x14, 0x1, @remote}, @NDA_IFINDEX={0x8, 0x8, r7}, @NDA_VLAN={0x6, 0x5, 0x1}, @NDA_LLADDR={0xa, 0x2, @broadcast}, @NDA_DST_MAC={0xa, 0x1, @remote}]}, 0x58}, 0x1, 0x0, 0x0, 0x80}, 0x4004) sendmmsg$alg(r0, &(0x7f0000000140), 0x4924b68, 0x0) 19:46:47 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x5e8d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:47 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3c000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:47 executing program 3: syz_emit_ethernet(0x184, &(0x7f0000000080)={@link_local, @random="b6371b9000", @void, {@ipv4={0x800, @gre={{0xa, 0x4, 0x0, 0x0, 0x176, 0x67, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @broadcast, {[@timestamp_prespec={0x44, 0xa, 0x25, 0x3, 0x4, [{@empty, 0x7fffffff}, {@multicast2, 0xd7}]}]}}, {{0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x880b, 0x0, 0x0, [0x0]}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800, [0x1000, 0xffe0, 0xa937], "692c60aa08f447bfea3f0fa96fa3cb9aea5b84c6631bcec5a70758c3dcb0bb2e48ba7e6981355df20b7a4b2e0ab57c14f84400cea61d3c5629bd383168a64b7306c1f7465b32210192f28b1b9ac9b2ff12d59e196b9906378bc8a17b13e9d868b58ce0df419de9c21f041876333d7937603f500b853e78c08436"}, {}, {}, {}, {0x8, 0x6558, 0x0, "4c301d5383f0a353f5416e978c8975df2c641551ccc763cda76b25c528f168253682c162553440c157f7c4a476acfdf5d46e9fb421bec6123b06c2e21756ffa1b7054e40d527bc9fe28c3027bf7f59c0c7a3f28f4f8c745efa4d548f9dd91f7da7d4cf8fda5c2a62bc83589e0f2e38b018952034f5afc47b15d303644528f98cc22f77757275a129ad6b4ea891c56e21"}}}}}}, 0x0) syz_emit_ethernet(0x22, &(0x7f0000000000)={@local, @link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0x3}, @val={@void, {0x8100, 0x1, 0x0, 0x3}}, {@can={0xc, {{0x3, 0x0, 0x1, 0x1}, 0x5, 0x1, 0x0, 0x0, "a4de109883d70edf"}}}}, &(0x7f0000000040)={0x0, 0x2, [0xa9, 0xd3, 0x8e1, 0x4c5]}) [ 2613.118708][T26266] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:46:47 executing program 3: syz_emit_ethernet(0x184, &(0x7f0000000080)={@link_local, @random="b6371b9000", @void, {@ipv4={0x800, @gre={{0xa, 0x4, 0x0, 0x0, 0x176, 0x67, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @broadcast, {[@timestamp_prespec={0x44, 0xa, 0x25, 0x3, 0x4, [{@empty, 0x7fffffff}, {@multicast2, 0xd7}]}]}}, {{0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x880b, 0x0, 0x0, [0x0]}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800, [0x1000, 0xffe0, 0xa937], "692c60aa08f447bfea3f0fa96fa3cb9aea5b84c6631bcec5a70758c3dcb0bb2e48ba7e6981355df20b7a4b2e0ab57c14f84400cea61d3c5629bd383168a64b7306c1f7465b32210192f28b1b9ac9b2ff12d59e196b9906378bc8a17b13e9d868b58ce0df419de9c21f041876333d7937603f500b853e78c08436"}, {}, {}, {}, {0x8, 0x6558, 0x0, "4c301d5383f0a353f5416e978c8975df2c641551ccc763cda76b25c528f168253682c162553440c157f7c4a476acfdf5d46e9fb421bec6123b06c2e21756ffa1b7054e40d527bc9fe28c3027bf7f59c0c7a3f28f4f8c745efa4d548f9dd91f7da7d4cf8fda5c2a62bc83589e0f2e38b018952034f5afc47b15d303644528f98cc22f77757275a129ad6b4ea891c56e21"}}}}}}, 0x0) (async) syz_emit_ethernet(0x22, &(0x7f0000000000)={@local, @link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0x3}, @val={@void, {0x8100, 0x1, 0x0, 0x3}}, {@can={0xc, {{0x3, 0x0, 0x1, 0x1}, 0x5, 0x1, 0x0, 0x0, "a4de109883d70edf"}}}}, &(0x7f0000000040)={0x0, 0x2, [0xa9, 0xd3, 0x8e1, 0x4c5]}) [ 2613.335446][T26266] bond1179: entered promiscuous mode [ 2613.344253][T26266] 8021q: adding VLAN 0 to HW filter on device bond1179 19:46:47 executing program 3: syz_emit_ethernet(0x184, &(0x7f0000000080)={@link_local, @random="b6371b9000", @void, {@ipv4={0x800, @gre={{0xa, 0x4, 0x0, 0x0, 0x176, 0x67, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @broadcast, {[@timestamp_prespec={0x44, 0xa, 0x25, 0x3, 0x4, [{@empty, 0x7fffffff}, {@multicast2, 0xd7}]}]}}, {{0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x880b, 0x0, 0x0, [0x0]}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x800, [0x1000, 0xffe0, 0xa937], "692c60aa08f447bfea3f0fa96fa3cb9aea5b84c6631bcec5a70758c3dcb0bb2e48ba7e6981355df20b7a4b2e0ab57c14f84400cea61d3c5629bd383168a64b7306c1f7465b32210192f28b1b9ac9b2ff12d59e196b9906378bc8a17b13e9d868b58ce0df419de9c21f041876333d7937603f500b853e78c08436"}, {}, {}, {}, {0x8, 0x6558, 0x0, "4c301d5383f0a353f5416e978c8975df2c641551ccc763cda76b25c528f168253682c162553440c157f7c4a476acfdf5d46e9fb421bec6123b06c2e21756ffa1b7054e40d527bc9fe28c3027bf7f59c0c7a3f28f4f8c745efa4d548f9dd91f7da7d4cf8fda5c2a62bc83589e0f2e38b018952034f5afc47b15d303644528f98cc22f77757275a129ad6b4ea891c56e21"}}}}}}, 0x0) (async) syz_emit_ethernet(0x22, &(0x7f0000000000)={@local, @link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0x3}, @val={@void, {0x8100, 0x1, 0x0, 0x3}}, {@can={0xc, {{0x3, 0x0, 0x1, 0x1}, 0x5, 0x1, 0x0, 0x0, "a4de109883d70edf"}}}}, &(0x7f0000000040)={0x0, 0x2, [0xa9, 0xd3, 0x8e1, 0x4c5]}) [ 2613.561163][T26267] bond1179: (slave bridge1082): making interface the new active one [ 2613.584495][T26267] bridge1082: entered promiscuous mode [ 2613.605026][T26267] bond1179: (slave bridge1082): Enslaving as an active interface with an up link 19:46:47 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3f000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2613.645995][T26272] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:46:47 executing program 3: ioctl$SIOCRSGL2CALL(0xffffffffffffffff, 0x89e5, &(0x7f0000000040)=@netrom) sendmsg$NL80211_CMD_SET_NOACK_MAP(0xffffffffffffffff, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000080)={&(0x7f0000000140)=ANY=[@ANYBLOB='\x00\x00\x00\x00', @ANYRES16=0x0, @ANYBLOB="7572a779fc959b0848bd32d5174cc1c0780643f3af2b2912000325bd7000fbdbdf255700000005e59500ff7e00009c410bfbe893abe9c3efcfa386b2883234b5afd07846ed82c09bcfd24da8701157e62cba342785ff4b5aa203234e5714475460de0ee281ccfa3a7e05fe2d02524d3c5dc3c0d640727cd196a220560f094931beb0f386e42b738ab18e02d0b0d72841e66f458de2bd1b8c01c331fc1829ad98ee95f8e41d5979ede4999d97462fc4f82260e232b7"], 0x1c}, 0x1, 0x0, 0x0, 0x20000000}, 0x28051) r0 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000200), 0xffffffffffffffff) socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f00000002c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)={0x14, r0, 0x2, 0x70bd28, 0x25dfdbff, {{}, {@void, @void}}, ["", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x400e0}, 0x0) [ 2613.766054][T26272] bond1104: entered promiscuous mode [ 2613.771855][T26272] 8021q: adding VLAN 0 to HW filter on device bond1104 19:46:47 executing program 3: ioctl$SIOCRSGL2CALL(0xffffffffffffffff, 0x89e5, &(0x7f0000000040)=@netrom) (async) sendmsg$NL80211_CMD_SET_NOACK_MAP(0xffffffffffffffff, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000080)={&(0x7f0000000140)=ANY=[@ANYBLOB='\x00\x00\x00\x00', @ANYRES16=0x0, @ANYBLOB="7572a779fc959b0848bd32d5174cc1c0780643f3af2b2912000325bd7000fbdbdf255700000005e59500ff7e00009c410bfbe893abe9c3efcfa386b2883234b5afd07846ed82c09bcfd24da8701157e62cba342785ff4b5aa203234e5714475460de0ee281ccfa3a7e05fe2d02524d3c5dc3c0d640727cd196a220560f094931beb0f386e42b738ab18e02d0b0d72841e66f458de2bd1b8c01c331fc1829ad98ee95f8e41d5979ede4999d97462fc4f82260e232b7"], 0x1c}, 0x1, 0x0, 0x0, 0x20000000}, 0x28051) (async) r0 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000200), 0xffffffffffffffff) socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f00000002c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)={0x14, r0, 0x2, 0x70bd28, 0x25dfdbff, {{}, {@void, @void}}, ["", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x400e0}, 0x0) [ 2613.979378][T26283] bond1104: (slave bridge1034): making interface the new active one [ 2614.013376][T26283] bridge1034: entered promiscuous mode 19:46:48 executing program 3: ioctl$SIOCRSGL2CALL(0xffffffffffffffff, 0x89e5, &(0x7f0000000040)=@netrom) (async) sendmsg$NL80211_CMD_SET_NOACK_MAP(0xffffffffffffffff, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000080)={&(0x7f0000000140)=ANY=[@ANYBLOB='\x00\x00\x00\x00', @ANYRES16=0x0, @ANYBLOB="7572a779fc959b0848bd32d5174cc1c0780643f3af2b2912000325bd7000fbdbdf255700000005e59500ff7e00009c410bfbe893abe9c3efcfa386b2883234b5afd07846ed82c09bcfd24da8701157e62cba342785ff4b5aa203234e5714475460de0ee281ccfa3a7e05fe2d02524d3c5dc3c0d640727cd196a220560f094931beb0f386e42b738ab18e02d0b0d72841e66f458de2bd1b8c01c331fc1829ad98ee95f8e41d5979ede4999d97462fc4f82260e232b7"], 0x1c}, 0x1, 0x0, 0x0, 0x20000000}, 0x28051) (async) r0 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000200), 0xffffffffffffffff) socket$nl_netfilter(0x10, 0x3, 0xc) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f00000002c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)={0x14, r0, 0x2, 0x70bd28, 0x25dfdbff, {{}, {@void, @void}}, ["", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x400e0}, 0x0) [ 2614.050880][T26283] bond1104: (slave bridge1034): Enslaving as an active interface with an up link 19:46:48 executing program 2: r0 = socket$inet6_udp(0xa, 0x2, 0x0) getsockopt$IP_VS_SO_GET_VERSION(r0, 0x0, 0x480, &(0x7f0000000040), &(0x7f0000000080)=0x40) sendmsg$NL80211_CMD_SET_MESH_CONFIG(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000140)=ANY=[@ANYBLOB="07af0000594c0da984d441c19ad78cf0b6e3c433ff0705a644acc56c2aa076b7a4116c1359832b2e27e83d635ef7e0db7f70cbd2a72f12ddb50565152d08e8853b85ae9f7f05712b98a28dc896701d5f2d5dab2d0c2c950933a7df012bebaaa766b95aac7685f8f6628daab45e0bdd28c560486582af0b2f07d2cbac3af4ff58269e0300000000000000fbe486e8a1130cc8d9a3a9257dfd80ee2fa258dca4c911aa2a3c5759f072f45088bcfd964a7e98252ddcc84e5aeca6da842e203e9cc1f3bf744a34ac9d1915ffb183f1c448823cd05e7c740d87e12a9aeac176ceb7fdb28ba2519e2dbff5dce308847974bd2512a39ddfc5828404b1b2e0", @ANYRES32=0x0, @ANYBLOB="020025bd700001dcdf251d00000008000300", @ANYRES16, @ANYBLOB="060099004000000020e83ba54b0001005b00000044002380060003001100fc000800150067000000080017000100000006000400390000e54b0c096d733c25692d820006000d004753000008001c00010000000600c0d3f6beec59a90c0000"], 0x74}, 0x1, 0x0, 0x0, 0x2400086e}, 0x80) unshare(0x44040600) unshare(0x44020200) unshare(0x400) unshare(0x0) unshare(0x0) unshare(0x0) unshare(0x40060e00) 19:46:48 executing program 0: socketpair$unix(0x1, 0x3, 0x0, &(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) sendmmsg$unix(r1, &(0x7f00000bd000), 0x318, 0x0) (async) ioctl$int_in(r1, 0x5452, &(0x7f0000000000)=0x1) close(r0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000080)=@bridge_dellink={0x6c, 0x11, 0x704, 0x70bd2b, 0x25dfdbfd, {0x7, 0x0, 0x0, 0x0, 0xc820, 0x40}, [@IFLA_EVENT={0x8, 0x2c, 0x1c42}, @IFLA_EVENT={0x8, 0x2c, 0x3}, @IFLA_EVENT={0x8, 0x2c, 0x3}, @IFLA_CARRIER_CHANGES={0x8, 0x23, 0x6}, @IFLA_BROADCAST={0xa, 0x2, @remote}, @IFLA_PHYS_PORT_ID={0x20, 0x22, "34ad5ef7ed2eb3116fbc0fdbab0516ce5ae1fed050adf60508e14a3e"}]}, 0x6c}, 0x1, 0x0, 0x0, 0x1}, 0x0) r3 = socket$nl_generic(0x10, 0x3, 0x10) (async) r4 = bpf$ITER_CREATE(0x21, &(0x7f00000001c0), 0x8) sendmsg$nl_route_sched(r4, &(0x7f0000000480)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x22582000}, 0xc, &(0x7f0000000440)={&(0x7f0000000280)=@delqdisc={0x1a8, 0x25, 0x4, 0x70bd27, 0x25dfdbfd, {0x0, 0x0, 0x0, 0x0, {0x9, 0x9}, {0x4e58d517c9d9cd, 0xfff1}, {0xffe0, 0xa}}, [@qdisc_kind_options=@q_fq_codel={{0xd}, {0x14, 0x2, [@TCA_FQ_CODEL_TARGET={0x8, 0x1, 0xfffffff7}, @TCA_FQ_CODEL_FLOWS={0x8, 0x5, 0x2}]}}, @TCA_STAB={0x2c, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x5, 0x3, 0x6c, 0x0, 0x0, 0x200, 0x101, 0x3}}, {0xa, 0x2, [0x7, 0x7, 0x3f]}}]}, @TCA_RATE={0x6, 0x5, {0x81, 0x9}}, @TCA_STAB={0x58, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x40, 0x0, 0xc39, 0x1, 0x3, 0x7, 0xffb, 0x7}}, {0x12, 0x2, [0x1, 0x3, 0x2, 0x9, 0x3ff, 0xcc7, 0x20]}}, {{0x1c, 0x1, {0x0, 0x40, 0x2, 0xffffb07f, 0x1, 0x2, 0xffffff00, 0x1}}, {0x6, 0x2, [0x6cf]}}]}, @TCA_EGRESS_BLOCK={0x8, 0xe, 0x9c}, @TCA_STAB={0x78, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x7, 0x99, 0x400, 0x9, 0x2, 0x6, 0x6, 0x1}}, {0x6, 0x2, [0x3]}}, {{0x1c, 0x1, {0x2, 0x18, 0x2, 0x1357c6bf, 0x1, 0x400, 0x8000, 0x2}}, {0x8, 0x2, [0xee4, 0x9]}}, {{0x1c, 0x1, {0x7, 0x80, 0x1ff, 0x3, 0x1, 0xf7, 0xe5b4, 0x6}}, {0x10, 0x2, [0x0, 0x7, 0x1, 0x0, 0x401, 0x80]}}]}, @TCA_EGRESS_BLOCK={0x8, 0xe, 0x3}, @qdisc_kind_options=@q_dsmark={{0xb}, {0x38, 0x2, [@TCA_DSMARK_INDICES={0x6, 0x1, 0x3a}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x8000}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x1}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x8}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0xffff}, @TCA_DSMARK_SET_TC_INDEX={0x4}]}}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0xfa04}]}, 0x1a8}, 0x1, 0x0, 0x0, 0x8000}, 0x811) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_STOP_AP(r2, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x20, r5, 0x804, 0x70bd28, 0x25dfdbfb, {{}, {@void, @val={0xc, 0x99, {0x9, 0x27}}}}, ["", "", ""]}, 0x20}, 0x1, 0x0, 0x0, 0x20040818}, 0x800) 19:46:48 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x5f8d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2614.122692][T26290] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:46:48 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) socket$inet_udplite(0x2, 0x2, 0x88) [ 2614.308908][T26290] bond1218: entered promiscuous mode [ 2614.315570][T26290] 8021q: adding VLAN 0 to HW filter on device bond1218 [ 2614.489902][T26292] bond1218: (slave bridge1148): making interface the new active one [ 2614.512194][T26292] bridge1148: entered promiscuous mode [ 2614.530031][T26292] bond1218: (slave bridge1148): Enslaving as an active interface with an up link 19:46:48 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3f000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:48 executing program 0: r0 = socket(0x10, 0x2, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$BATADV_CMD_GET_MESH(r2, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) pipe(&(0x7f0000000600)={0xffffffffffffffff}) connect$pppoe(r3, &(0x7f0000000640)={0x18, 0x0, {0x0, @random="b5bc8aaa3a99", 'ip6gre0\x00'}}, 0x1e) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r4, &(0x7f00000003c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000380)={&(0x7f0000000180)=@bridge_getneigh={0x58, 0x1e, 0x8, 0x70bd2a, 0x25dfdbfd, {0x7, 0x0, 0x0, 0x0, 0x40000, 0x22800}, [@IFLA_PHYS_SWITCH_ID={0x21, 0x24, "e158c648fc38c9f6b1f726e0722ded8abe2eaefa4be5af0d039eaa5453"}, @IFLA_IFALIASn={0x4}, @IFLA_PROTO_DOWN={0x5, 0x27, 0x40}, @IFLA_EVENT={0x8, 0x2c, 0x3f}]}, 0x58}, 0x1, 0x0, 0x0, 0x8014}, 0x4004) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffffff000000", @ANYRES32=r5, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000240)=@newqdisc={0x138, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r5, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_gred={{0x9}, {0x108, 0x2, [@TCA_GRED_STAB={0x104, 0x2, "56ccd1a4ecd5f054a69bca0e4a98373585fa9cdbbda1a43d7c654010009fa58fad645fe6d71f2b993d777c161b81e2cc2ee4bad056d3a0fefcdd632bb41f381119e1b86c70df47bccc561f6b24d012d55bf8b734a55f6579cfe7f5a4a0184ccc062b0adc9dd35812899ad6b07c895085b0e9b3354efc65e50806f75449c9bdc90f4585d316045c2f00542f2cc781b3ad1c792b6f173bffcc2bc00331b7aff3b8b956c346dcce87b0477ff05a8ea65b7bd514d6c5453183e11893b49dbac29da545271b03e601e157acb3ae954ab88cc6e054d613a861a46fd54afaf256bc15b798161946105ff267d5750b6c6576558b5a584c54169d00f8de64c271f563c950"}]}}]}, 0x138}}, 0x0) r6 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r7 = syz_genetlink_get_family_id$nl802154(&(0x7f0000000440), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f0000000500)={'wpan1\x00', 0x0}) getsockopt$PNPIPE_IFINDEX(r2, 0x113, 0x2, &(0x7f0000000840)=0x0, &(0x7f0000000980)=0x4) sendmsg$nl_route_sched(r0, &(0x7f0000000b00)={&(0x7f0000000800)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000ac0)={&(0x7f00000009c0)=@newqdisc={0x100, 0x24, 0x400, 0x70bd2b, 0x25dfdbff, {0x0, 0x0, 0x0, r9, {0x3, 0xf}, {0x5, 0x10}, {0xc, 0xa}}, [@qdisc_kind_options=@q_dsmark={{0xb}, {0x38, 0x2, [@TCA_DSMARK_INDICES={0x6, 0x1, 0x4}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x11}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x5}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x22}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x5}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x30}]}}, @TCA_RATE={0x6, 0x5, {0x6, 0xe0}}, @TCA_RATE={0x6, 0x5, {0xc9, 0x6}}, @TCA_STAB={0x80, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0xa8, 0x4c, 0x3, 0x6, 0x2, 0x0, 0x3ff, 0x3}}, {0xa, 0x2, [0xb4, 0x4, 0x1]}}, {{0x1c, 0x1, {0x1, 0x1, 0x81, 0x1000, 0x2, 0x3, 0x5, 0x7}}, {0x12, 0x2, [0x3f, 0x7, 0x1f, 0x6, 0x4, 0x5, 0x4]}}, {{0x1c, 0x1, {0x0, 0x0, 0x98, 0x81, 0x0, 0x3f, 0x800, 0x1}}, {0x6, 0x2, [0x0]}}]}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x7}]}, 0x100}, 0x1, 0x0, 0x0, 0x4000040}, 0x40000) syz_genetlink_get_family_id$nl802154(&(0x7f00000001c0), 0xffffffffffffffff) r10 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r11 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_802154(r11, 0x8933, &(0x7f0000000940)={'wpan1\x00', 0x0}) r13 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r14 = syz_genetlink_get_family_id$ieee802154(&(0x7f0000000000), r10) sendmsg$IEEE802154_LLSEC_ADD_DEV(r13, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000200)={&(0x7f00000000c0)={0x50, r14, 0x852dd6c070cd7e4d, 0x0, 0x0, {}, [@IEEE802154_ATTR_LLSEC_FRAME_COUNTER={0x8}, @IEEE802154_ATTR_LLSEC_DEV_OVERRIDE={0x5}, @IEEE802154_ATTR_HW_ADDR={0xc}, @IEEE802154_ATTR_LLSEC_DEV_KEY_MODE={0x5, 0x37, 0x3}, @IEEE802154_ATTR_DEV_INDEX={0x8, 0x2, r12}, @IEEE802154_ATTR_SHORT_ADDR={0x6}, @IEEE802154_ATTR_PAN_ID={0x6}]}, 0x140}, 0x4, 0x700000000000700}, 0x0) sendmsg$IEEE802154_LLSEC_SETPARAMS(0xffffffffffffffff, &(0x7f0000000c40)={&(0x7f0000000b40)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000c00)={&(0x7f0000000b80)={0x5c, r14, 0x200, 0x70bd2d, 0x25dfdbfc, {}, [@IEEE802154_ATTR_SHORT_ADDR={0x6, 0x4, 0xaaa3}, @IEEE802154_ATTR_HW_ADDR={0xc, 0x5, {0xaaaaaaaaaaaa0102}}, @IEEE802154_ATTR_LLSEC_KEY_ID={0x5, 0x2e, 0xfe}, @IEEE802154_ATTR_LLSEC_KEY_MODE={0x5, 0x2b, 0x3}, @IEEE802154_ATTR_LLSEC_FRAME_COUNTER={0x8}, @IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED={0xc}, @IEEE802154_ATTR_LLSEC_SECLEVEL={0x5}, @IEEE802154_ATTR_LLSEC_ENABLED={0x5, 0x29, 0x1}]}, 0x5c}, 0x1, 0x0, 0x0, 0x5}, 0x14000044) r15 = socket$nl_generic(0x10, 0x3, 0x10) r16 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r15, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r15, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)=ANY=[@ANYBLOB, @ANYRES16=r16, @ANYBLOB="010000000000000000005a00000008000300", @ANYRES32=r17, @ANYBLOB], 0x1c}}, 0x0) sendmsg$MPTCP_PM_CMD_SET_LIMITS(r15, &(0x7f0000000780)={&(0x7f0000000680)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000740)={&(0x7f00000006c0)={0x68, 0x0, 0x10, 0x70bd29, 0x25dfdbfe, {}, [@MPTCP_PM_ATTR_ADDR={0xc, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FLAGS={0x8, 0x6, 0x4}]}, @MPTCP_PM_ATTR_ADDR={0x4}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x3}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x1}, @MPTCP_PM_ATTR_SUBFLOWS={0x8}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x1}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x4}, @MPTCP_PM_ATTR_ADDR={0x14, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FAMILY={0x6, 0x1, 0xa}, @MPTCP_PM_ADDR_ATTR_ADDR4={0x8, 0x3, @empty}]}]}, 0x68}, 0x1, 0x0, 0x0, 0x801}, 0x800) sendmsg$NL802154_CMD_GET_SEC_DEVKEY(r6, &(0x7f00000005c0)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000580)={&(0x7f0000000540)={0x34, r7, 0x0, 0x70bd2b, 0x25dfdbff, {}, [@NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x300000003}, @NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x3}, @NL802154_ATTR_IFINDEX={0x8, 0x3, r8}]}, 0x34}, 0x1, 0x0, 0x0, 0x890}, 0x4004010) 19:46:48 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) socket$inet_udplite(0x2, 0x2, 0x88) 19:46:48 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) socket$inet_udplite(0x2, 0x2, 0x88) [ 2614.608054][T26300] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:46:48 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x40000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:48 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{}, {}, {}, {}, {0x8, 0x22eb, 0x0, {{}, 0x2, {0x20000}}}}}}}}, 0x0) 19:46:49 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{}, {}, {}, {}, {0x8, 0x22eb, 0x0, {{}, 0x2, {0x20000}}}}}}}}, 0x0) [ 2614.667456][T26300] workqueue: Failed to create a rescuer kthread for wq "bond1180": -EINTR [ 2615.028132][T26324] bond1105: entered promiscuous mode [ 2615.055887][T26324] 8021q: adding VLAN 0 to HW filter on device bond1105 19:46:49 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}, {{}, {}, {}, {}, {0x8, 0x22eb, 0x0, {{}, 0x2, {0x20000}}}}}}}}, 0x0) [ 2615.265604][T26327] bond1105: (slave bridge1035): making interface the new active one [ 2615.291805][T26327] bridge1035: entered promiscuous mode [ 2615.319125][T26327] bond1105: (slave bridge1035): Enslaving as an active interface with an up link [ 2615.345384][T26331] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2615.516512][T26335] bond1219: entered promiscuous mode [ 2615.524222][T26335] 8021q: adding VLAN 0 to HW filter on device bond1219 [ 2615.580547][T26340] bond1219: (slave bridge1149): making interface the new active one [ 2615.589942][T26340] bridge1149: entered promiscuous mode [ 2615.602495][T26340] bond1219: (slave bridge1149): Enslaving as an active interface with an up link [ 2615.739354][T26349] bond1180: entered promiscuous mode [ 2615.754398][T26349] 8021q: adding VLAN 0 to HW filter on device bond1180 [ 2615.868065][T26351] bond1180: (slave bridge1083): making interface the new active one [ 2615.903391][T26351] bridge1083: entered promiscuous mode [ 2615.943833][T26351] bond1180: (slave bridge1083): Enslaving as an active interface with an up link 19:46:53 executing program 2: r0 = socket$inet6_udp(0xa, 0x2, 0x0) getsockopt$IP_VS_SO_GET_VERSION(r0, 0x0, 0x480, &(0x7f0000000040), &(0x7f0000000080)=0x40) sendmsg$NL80211_CMD_SET_MESH_CONFIG(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000140)=ANY=[@ANYBLOB="07af0000594c0da984d441c19ad78cf0b6e3c433ff0705a644acc56c2aa076b7a4116c1359832b2e27e83d635ef7e0db7f70cbd2a72f12ddb50565152d08e8853b85ae9f7f05712b98a28dc896701d5f2d5dab2d0c2c950933a7df012bebaaa766b95aac7685f8f6628daab45e0bdd28c560486582af0b2f07d2cbac3af4ff58269e0300000000000000fbe486e8a1130cc8d9a3a9257dfd80ee2fa258dca4c911aa2a3c5759f072f45088bcfd964a7e98252ddcc84e5aeca6da842e203e9cc1f3bf744a34ac9d1915ffb183f1c448823cd05e7c740d87e12a9aeac176ceb7fdb28ba2519e2dbff5dce308847974bd2512a39ddfc5828404b1b2e0", @ANYRES32=0x0, @ANYBLOB="020025bd700001dcdf251d00000008000300", @ANYRES16, @ANYBLOB="060099004000000020e83ba54b0001005b00000044002380060003001100fc000800150067000000080017000100000006000400390000e54b0c096d733c25692d820006000d004753000008001c00010000000600c0d3f6beec59a90c0000"], 0x74}, 0x1, 0x0, 0x0, 0x2400086e}, 0x80) unshare(0x44040600) (async) unshare(0x44020200) (async) unshare(0x400) (async) unshare(0x0) unshare(0x0) (async) unshare(0x0) (async) unshare(0x40060e00) 19:46:53 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb01000000000000000200000000000000000000000800655800000000"], 0x0) 19:46:53 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x60000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:53 executing program 0: r0 = socket(0x10, 0x2, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 32) r2 = socket(0x10, 0x803, 0x0) (rerun: 32) sendmsg$BATADV_CMD_GET_MESH(r2, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) pipe(&(0x7f0000000600)={0xffffffffffffffff}) connect$pppoe(r3, &(0x7f0000000640)={0x18, 0x0, {0x0, @random="b5bc8aaa3a99", 'ip6gre0\x00'}}, 0x1e) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r4, &(0x7f00000003c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000380)={&(0x7f0000000180)=@bridge_getneigh={0x58, 0x1e, 0x8, 0x70bd2a, 0x25dfdbfd, {0x7, 0x0, 0x0, 0x0, 0x40000, 0x22800}, [@IFLA_PHYS_SWITCH_ID={0x21, 0x24, "e158c648fc38c9f6b1f726e0722ded8abe2eaefa4be5af0d039eaa5453"}, @IFLA_IFALIASn={0x4}, @IFLA_PROTO_DOWN={0x5, 0x27, 0x40}, @IFLA_EVENT={0x8, 0x2c, 0x3f}]}, 0x58}, 0x1, 0x0, 0x0, 0x8014}, 0x4004) (async) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffffff000000", @ANYRES32=r5, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000240)=@newqdisc={0x138, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r5, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_gred={{0x9}, {0x108, 0x2, [@TCA_GRED_STAB={0x104, 0x2, "56ccd1a4ecd5f054a69bca0e4a98373585fa9cdbbda1a43d7c654010009fa58fad645fe6d71f2b993d777c161b81e2cc2ee4bad056d3a0fefcdd632bb41f381119e1b86c70df47bccc561f6b24d012d55bf8b734a55f6579cfe7f5a4a0184ccc062b0adc9dd35812899ad6b07c895085b0e9b3354efc65e50806f75449c9bdc90f4585d316045c2f00542f2cc781b3ad1c792b6f173bffcc2bc00331b7aff3b8b956c346dcce87b0477ff05a8ea65b7bd514d6c5453183e11893b49dbac29da545271b03e601e157acb3ae954ab88cc6e054d613a861a46fd54afaf256bc15b798161946105ff267d5750b6c6576558b5a584c54169d00f8de64c271f563c950"}]}}]}, 0x138}}, 0x0) r6 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r7 = syz_genetlink_get_family_id$nl802154(&(0x7f0000000440), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f0000000500)={'wpan1\x00', 0x0}) (async) getsockopt$PNPIPE_IFINDEX(r2, 0x113, 0x2, &(0x7f0000000840)=0x0, &(0x7f0000000980)=0x4) sendmsg$nl_route_sched(r0, &(0x7f0000000b00)={&(0x7f0000000800)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000ac0)={&(0x7f00000009c0)=@newqdisc={0x100, 0x24, 0x400, 0x70bd2b, 0x25dfdbff, {0x0, 0x0, 0x0, r9, {0x3, 0xf}, {0x5, 0x10}, {0xc, 0xa}}, [@qdisc_kind_options=@q_dsmark={{0xb}, {0x38, 0x2, [@TCA_DSMARK_INDICES={0x6, 0x1, 0x4}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x11}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x5}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x22}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x5}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x30}]}}, @TCA_RATE={0x6, 0x5, {0x6, 0xe0}}, @TCA_RATE={0x6, 0x5, {0xc9, 0x6}}, @TCA_STAB={0x80, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0xa8, 0x4c, 0x3, 0x6, 0x2, 0x0, 0x3ff, 0x3}}, {0xa, 0x2, [0xb4, 0x4, 0x1]}}, {{0x1c, 0x1, {0x1, 0x1, 0x81, 0x1000, 0x2, 0x3, 0x5, 0x7}}, {0x12, 0x2, [0x3f, 0x7, 0x1f, 0x6, 0x4, 0x5, 0x4]}}, {{0x1c, 0x1, {0x0, 0x0, 0x98, 0x81, 0x0, 0x3f, 0x800, 0x1}}, {0x6, 0x2, [0x0]}}]}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x7}]}, 0x100}, 0x1, 0x0, 0x0, 0x4000040}, 0x40000) syz_genetlink_get_family_id$nl802154(&(0x7f00000001c0), 0xffffffffffffffff) (async) r10 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) r11 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_802154(r11, 0x8933, &(0x7f0000000940)={'wpan1\x00', 0x0}) (async, rerun: 64) r13 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async, rerun: 64) r14 = syz_genetlink_get_family_id$ieee802154(&(0x7f0000000000), r10) sendmsg$IEEE802154_LLSEC_ADD_DEV(r13, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000200)={&(0x7f00000000c0)={0x50, r14, 0x852dd6c070cd7e4d, 0x0, 0x0, {}, [@IEEE802154_ATTR_LLSEC_FRAME_COUNTER={0x8}, @IEEE802154_ATTR_LLSEC_DEV_OVERRIDE={0x5}, @IEEE802154_ATTR_HW_ADDR={0xc}, @IEEE802154_ATTR_LLSEC_DEV_KEY_MODE={0x5, 0x37, 0x3}, @IEEE802154_ATTR_DEV_INDEX={0x8, 0x2, r12}, @IEEE802154_ATTR_SHORT_ADDR={0x6}, @IEEE802154_ATTR_PAN_ID={0x6}]}, 0x140}, 0x4, 0x700000000000700}, 0x0) (async, rerun: 32) sendmsg$IEEE802154_LLSEC_SETPARAMS(0xffffffffffffffff, &(0x7f0000000c40)={&(0x7f0000000b40)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000c00)={&(0x7f0000000b80)={0x5c, r14, 0x200, 0x70bd2d, 0x25dfdbfc, {}, [@IEEE802154_ATTR_SHORT_ADDR={0x6, 0x4, 0xaaa3}, @IEEE802154_ATTR_HW_ADDR={0xc, 0x5, {0xaaaaaaaaaaaa0102}}, @IEEE802154_ATTR_LLSEC_KEY_ID={0x5, 0x2e, 0xfe}, @IEEE802154_ATTR_LLSEC_KEY_MODE={0x5, 0x2b, 0x3}, @IEEE802154_ATTR_LLSEC_FRAME_COUNTER={0x8}, @IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED={0xc}, @IEEE802154_ATTR_LLSEC_SECLEVEL={0x5}, @IEEE802154_ATTR_LLSEC_ENABLED={0x5, 0x29, 0x1}]}, 0x5c}, 0x1, 0x0, 0x0, 0x5}, 0x14000044) (rerun: 32) r15 = socket$nl_generic(0x10, 0x3, 0x10) r16 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r15, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r15, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)=ANY=[@ANYBLOB, @ANYRES16=r16, @ANYBLOB="010000000000000000005a00000008000300", @ANYRES32=r17, @ANYBLOB], 0x1c}}, 0x0) (async) sendmsg$MPTCP_PM_CMD_SET_LIMITS(r15, &(0x7f0000000780)={&(0x7f0000000680)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000740)={&(0x7f00000006c0)={0x68, 0x0, 0x10, 0x70bd29, 0x25dfdbfe, {}, [@MPTCP_PM_ATTR_ADDR={0xc, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FLAGS={0x8, 0x6, 0x4}]}, @MPTCP_PM_ATTR_ADDR={0x4}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x3}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x1}, @MPTCP_PM_ATTR_SUBFLOWS={0x8}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x1}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x4}, @MPTCP_PM_ATTR_ADDR={0x14, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FAMILY={0x6, 0x1, 0xa}, @MPTCP_PM_ADDR_ATTR_ADDR4={0x8, 0x3, @empty}]}]}, 0x68}, 0x1, 0x0, 0x0, 0x801}, 0x800) (async) sendmsg$NL802154_CMD_GET_SEC_DEVKEY(r6, &(0x7f00000005c0)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000580)={&(0x7f0000000540)={0x34, r7, 0x0, 0x70bd2b, 0x25dfdbff, {}, [@NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x300000003}, @NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x3}, @NL802154_ATTR_IFINDEX={0x8, 0x3, r8}]}, 0x34}, 0x1, 0x0, 0x0, 0x890}, 0x4004010) 19:46:53 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x40000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:53 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x42020000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:53 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb01000000000000000200000000000000000000000800655800000000"], 0x0) [ 2619.288968][T26368] validate_nla: 3 callbacks suppressed [ 2619.289018][T26368] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:46:53 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb01000000000000000200000000000000000000000800655800000000"], 0x0) [ 2619.439129][T26368] bond1220: entered promiscuous mode [ 2619.453865][T26368] 8021q: adding VLAN 0 to HW filter on device bond1220 [ 2619.478548][T26367] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:46:53 executing program 3: r0 = socket(0x1, 0x803, 0x0) getsockname$packet(r0, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x1, 0x803, 0x0) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) sendmsg$nl_route(r2, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000600)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="04a0040000000000140003006272696467655f736c6176655f31000008000a00", @ANYRES32=r4], 0x3c}}, 0x0) ioctl$sock_ipv6_tunnel_SIOCDELTUNNEL(r0, 0x89f2, &(0x7f0000000280)={'ip6_vti0\x00', &(0x7f0000000200)={'ip6tnl0\x00', r1, 0x4, 0xff, 0x4d, 0x0, 0x40, @dev={0xfe, 0x80, '\x00', 0x2e}, @private1={0xfc, 0x1, '\x00', 0x1}, 0x10, 0x700, 0x9, 0x4}}) sendmsg$nl_route_sched(r2, &(0x7f0000000680)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000640)={&(0x7f0000000600)=@newchain={0x34, 0x64, 0x8, 0x70bd26, 0x25dfdbfe, {0x0, 0x0, 0x0, r5, {0x8, 0x6}, {0xe, 0x8}, {0x6}}, [@TCA_CHAIN={0x8, 0xb, 0x5}, @TCA_RATE={0x6, 0x5, {0x1, 0xff}}]}, 0x34}, 0x1, 0x0, 0x0, 0x24002000}, 0x4800) r6 = syz_genetlink_get_family_id$nl80211(&(0x7f00000000c0), r0) sendmsg$NL80211_CMD_UPDATE_OWE_INFO(r0, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000140)={&(0x7f0000000300)={0x2cc, r6, 0x300, 0x70bd29, 0x25dfdbfb, {{}, {@void, @val={0xc, 0x99, {0x7, 0x76}}}}, [@NL80211_ATTR_IE={0x1c0, 0x2a, [@channel_switch={0x25, 0x3, {0x1, 0xb4, 0x1}}, @fast_bss_trans={0x37, 0xbf, {0x2, 0x5, "fd6aa4831b7faef65601fb55e17a6ad6", "7092c043457c3fbf6e37afc641848b3aff7e170efbed8d8422f78ece3890eb0a", "3285dbda8634018a0598358287b5431dbfa741f6789b53186b3c3645e9a1cf18", [{0x2, 0x6, "36e9da15dee8"}, {0x4, 0x12, "92a80ef19936257a495c32d511b855a164d3"}, {0x3, 0x20, "0bbafa901cb79fe6da740dac55843b002e54fbcdd8658d454febe0b0decb85ea"}, {0x2, 0x4, "2363d6a2"}, {0x1, 0x27, "accf7089fce980a921f97f141094db9d644fd6dea1298237ed0cda1fdc9674b3dfe71a7e9aeca5"}]}}, @random_vendor={0xdd, 0x43, "769d838147aa60fc07b26d62044b4a4190c4680b32c89d564166e5777ed7fcd0fa9c30c5f76992dba9261239892fcdbf00b3ccb954c28883b33972e0670a210641cf51"}, @measure_req={0x26, 0x74, {0x2, 0x5, 0x8b, "94779d46836b54a2045519e9dccad9fae5e3fc0fc47ada0c1a8b3e4db0437cf9bc927d4f4d5651cd0b1a5b3bbc73881a0f75ec7dbf0b863b1e22819be28d1f86616de1d1f9ad0b557696e1e8b650f3ff2459efca0abec6cfdac6ea0dc6c6a728a5bffa786c4370b2fb5954b1867d0004fa"}}, @random_vendor={0xdd, 0x39, "d1857ba0c1ae9d8b0173717d45a7f22addcf8a7b1bc40451f177fc5b7e47f21f11e44a237d215bccf7e3d7ad4ccf7eaff73e73bfc7d0139713"}]}, @NL80211_ATTR_IE={0xe9, 0x2a, [@supported_rates={0x1, 0x8, [{0x75}, {0x1b}, {0x5}, {0xc, 0x1}, {0x6, 0x1}, {0x16, 0x1}, {0x6}, {0x4f}]}, @mesh_config={0x71, 0x7, {0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x5, 0x48}}, @link_id={0x65, 0x12, {@from_mac=@device_b, @device_b, @device_b}}, @mesh_chsw={0x76, 0x6, {0x1f, 0x0, 0x32, 0x4}}, @mesh_id={0x72, 0x6}, @link_id={0x65, 0x12, {@random="2ff66796c0d6", @device_b, @broadcast}}, @random_vendor={0xdd, 0x90, "00f7903d8c9b5ce9a742d947cf3a0c387c9032404699f30ca7a2c0f14f5cb3e142d0bc28d8c7f093c3e9ce8eb89fed4f56de78b1778497cab2215a7cd433333ea213c29debfda0711798bd40e00462fd388974c1ebc57ee5e7e320a656fd6f954b6d8201111eb6406cc25c7389c1a54e41582ea0b81bc1b77be25def9aeddf6cf38aebd0946f7f7348903ecd05c3c119"}, @mesh_chsw={0x76, 0x6, {0x76, 0x1, 0x39, 0x1}}]}]}, 0x2cc}, 0x1, 0x0, 0x0, 0x4040000}, 0x0) getsockopt$inet_sctp_SCTP_FRAGMENT_INTERLEAVE(r0, 0x84, 0x12, &(0x7f0000000000), &(0x7f0000000040)=0x4) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) [ 2619.595877][T26367] bond1181: entered promiscuous mode [ 2619.602361][T26367] 8021q: adding VLAN 0 to HW filter on device bond1181 [ 2619.619259][T26369] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2619.678073][T26369] bond1106: entered promiscuous mode [ 2619.684797][T26369] 8021q: adding VLAN 0 to HW filter on device bond1106 [ 2619.707927][T26373] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2619.786112][T26376] bond1181: (slave bridge1084): making interface the new active one [ 2619.795521][T26376] bridge1084: entered promiscuous mode [ 2619.808901][T26376] bond1181: (slave bridge1084): Enslaving as an active interface with an up link 19:46:53 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x48000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2619.929473][T26378] bond1106: (slave bridge1036): making interface the new active one [ 2619.945195][T26378] bridge1036: entered promiscuous mode [ 2619.958857][T26378] bond1106: (slave bridge1036): Enslaving as an active interface with an up link [ 2619.993765][T26398] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:46:54 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x608d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2620.115465][T26398] bond1182: entered promiscuous mode [ 2620.121168][T26398] 8021q: adding VLAN 0 to HW filter on device bond1182 [ 2620.171365][T26380] bond1220: (slave bridge1150): making interface the new active one [ 2620.179892][T26380] bridge1150: entered promiscuous mode [ 2620.190812][T26380] bond1220: (slave bridge1150): Enslaving as an active interface with an up link 19:46:54 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x40020000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2620.498569][T26400] bond1182: (slave bridge1085): making interface the new active one [ 2620.509312][T26400] bridge1085: entered promiscuous mode [ 2620.525504][T26400] bond1182: (slave bridge1085): Enslaving as an active interface with an up link [ 2620.540770][T26403] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2620.611537][T26403] bond1107: entered promiscuous mode [ 2620.633699][T26403] 8021q: adding VLAN 0 to HW filter on device bond1107 [ 2620.756852][T26404] bond1107: (slave bridge1037): making interface the new active one [ 2620.793670][T26404] bridge1037: entered promiscuous mode [ 2620.805364][T26404] bond1107: (slave bridge1037): Enslaving as an active interface with an up link [ 2620.857588][T26407] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2620.962872][T26407] bond1221: entered promiscuous mode [ 2620.969909][T26407] 8021q: adding VLAN 0 to HW filter on device bond1221 [ 2621.030071][T26408] bond1221: (slave bridge1151): making interface the new active one [ 2621.038488][T26408] bridge1151: entered promiscuous mode [ 2621.050090][T26408] bond1221: (slave bridge1151): Enslaving as an active interface with an up link 19:46:58 executing program 2: socket$inet6_udp(0xa, 0x2, 0x0) (async) r0 = socket$inet6_udp(0xa, 0x2, 0x0) getsockopt$IP_VS_SO_GET_VERSION(r0, 0x0, 0x480, &(0x7f0000000040), &(0x7f0000000080)=0x40) (async) getsockopt$IP_VS_SO_GET_VERSION(r0, 0x0, 0x480, &(0x7f0000000040), &(0x7f0000000080)=0x40) sendmsg$NL80211_CMD_SET_MESH_CONFIG(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000140)=ANY=[@ANYBLOB="07af0000594c0da984d441c19ad78cf0b6e3c433ff0705a644acc56c2aa076b7a4116c1359832b2e27e83d635ef7e0db7f70cbd2a72f12ddb50565152d08e8853b85ae9f7f05712b98a28dc896701d5f2d5dab2d0c2c950933a7df012bebaaa766b95aac7685f8f6628daab45e0bdd28c560486582af0b2f07d2cbac3af4ff58269e0300000000000000fbe486e8a1130cc8d9a3a9257dfd80ee2fa258dca4c911aa2a3c5759f072f45088bcfd964a7e98252ddcc84e5aeca6da842e203e9cc1f3bf744a34ac9d1915ffb183f1c448823cd05e7c740d87e12a9aeac176ceb7fdb28ba2519e2dbff5dce308847974bd2512a39ddfc5828404b1b2e0", @ANYRES32=0x0, @ANYBLOB="020025bd700001dcdf251d00000008000300", @ANYRES16, @ANYBLOB="060099004000000020e83ba54b0001005b00000044002380060003001100fc000800150067000000080017000100000006000400390000e54b0c096d733c25692d820006000d004753000008001c00010000000600c0d3f6beec59a90c0000"], 0x74}, 0x1, 0x0, 0x0, 0x2400086e}, 0x80) unshare(0x44040600) unshare(0x44020200) unshare(0x400) unshare(0x0) unshare(0x0) unshare(0x0) (async) unshare(0x0) unshare(0x40060e00) 19:46:58 executing program 0: r0 = socket(0x10, 0x2, 0x0) (async) r1 = socket$nl_route(0x10, 0x3, 0x0) (async) r2 = socket(0x10, 0x803, 0x0) sendmsg$BATADV_CMD_GET_MESH(r2, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) pipe(&(0x7f0000000600)={0xffffffffffffffff}) connect$pppoe(r3, &(0x7f0000000640)={0x18, 0x0, {0x0, @random="b5bc8aaa3a99", 'ip6gre0\x00'}}, 0x1e) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r4, &(0x7f00000003c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000380)={&(0x7f0000000180)=@bridge_getneigh={0x58, 0x1e, 0x8, 0x70bd2a, 0x25dfdbfd, {0x7, 0x0, 0x0, 0x0, 0x40000, 0x22800}, [@IFLA_PHYS_SWITCH_ID={0x21, 0x24, "e158c648fc38c9f6b1f726e0722ded8abe2eaefa4be5af0d039eaa5453"}, @IFLA_IFALIASn={0x4}, @IFLA_PROTO_DOWN={0x5, 0x27, 0x40}, @IFLA_EVENT={0x8, 0x2c, 0x3f}]}, 0x58}, 0x1, 0x0, 0x0, 0x8014}, 0x4004) (async) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffffff000000", @ANYRES32=r5, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000240)=@newqdisc={0x138, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r5, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_gred={{0x9}, {0x108, 0x2, [@TCA_GRED_STAB={0x104, 0x2, "56ccd1a4ecd5f054a69bca0e4a98373585fa9cdbbda1a43d7c654010009fa58fad645fe6d71f2b993d777c161b81e2cc2ee4bad056d3a0fefcdd632bb41f381119e1b86c70df47bccc561f6b24d012d55bf8b734a55f6579cfe7f5a4a0184ccc062b0adc9dd35812899ad6b07c895085b0e9b3354efc65e50806f75449c9bdc90f4585d316045c2f00542f2cc781b3ad1c792b6f173bffcc2bc00331b7aff3b8b956c346dcce87b0477ff05a8ea65b7bd514d6c5453183e11893b49dbac29da545271b03e601e157acb3ae954ab88cc6e054d613a861a46fd54afaf256bc15b798161946105ff267d5750b6c6576558b5a584c54169d00f8de64c271f563c950"}]}}]}, 0x138}}, 0x0) r6 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r7 = syz_genetlink_get_family_id$nl802154(&(0x7f0000000440), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f0000000500)={'wpan1\x00', 0x0}) getsockopt$PNPIPE_IFINDEX(r2, 0x113, 0x2, &(0x7f0000000840)=0x0, &(0x7f0000000980)=0x4) sendmsg$nl_route_sched(r0, &(0x7f0000000b00)={&(0x7f0000000800)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000ac0)={&(0x7f00000009c0)=@newqdisc={0x100, 0x24, 0x400, 0x70bd2b, 0x25dfdbff, {0x0, 0x0, 0x0, r9, {0x3, 0xf}, {0x5, 0x10}, {0xc, 0xa}}, [@qdisc_kind_options=@q_dsmark={{0xb}, {0x38, 0x2, [@TCA_DSMARK_INDICES={0x6, 0x1, 0x4}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x11}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x5}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x22}, @TCA_DSMARK_DEFAULT_INDEX={0x6, 0x2, 0x5}, @TCA_DSMARK_SET_TC_INDEX={0x4}, @TCA_DSMARK_INDICES={0x6, 0x1, 0x30}]}}, @TCA_RATE={0x6, 0x5, {0x6, 0xe0}}, @TCA_RATE={0x6, 0x5, {0xc9, 0x6}}, @TCA_STAB={0x80, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0xa8, 0x4c, 0x3, 0x6, 0x2, 0x0, 0x3ff, 0x3}}, {0xa, 0x2, [0xb4, 0x4, 0x1]}}, {{0x1c, 0x1, {0x1, 0x1, 0x81, 0x1000, 0x2, 0x3, 0x5, 0x7}}, {0x12, 0x2, [0x3f, 0x7, 0x1f, 0x6, 0x4, 0x5, 0x4]}}, {{0x1c, 0x1, {0x0, 0x0, 0x98, 0x81, 0x0, 0x3f, 0x800, 0x1}}, {0x6, 0x2, [0x0]}}]}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x7}]}, 0x100}, 0x1, 0x0, 0x0, 0x4000040}, 0x40000) (async) syz_genetlink_get_family_id$nl802154(&(0x7f00000001c0), 0xffffffffffffffff) (async) r10 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) r11 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_802154(r11, 0x8933, &(0x7f0000000940)={'wpan1\x00', 0x0}) (async) r13 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) r14 = syz_genetlink_get_family_id$ieee802154(&(0x7f0000000000), r10) sendmsg$IEEE802154_LLSEC_ADD_DEV(r13, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000200)={&(0x7f00000000c0)={0x50, r14, 0x852dd6c070cd7e4d, 0x0, 0x0, {}, [@IEEE802154_ATTR_LLSEC_FRAME_COUNTER={0x8}, @IEEE802154_ATTR_LLSEC_DEV_OVERRIDE={0x5}, @IEEE802154_ATTR_HW_ADDR={0xc}, @IEEE802154_ATTR_LLSEC_DEV_KEY_MODE={0x5, 0x37, 0x3}, @IEEE802154_ATTR_DEV_INDEX={0x8, 0x2, r12}, @IEEE802154_ATTR_SHORT_ADDR={0x6}, @IEEE802154_ATTR_PAN_ID={0x6}]}, 0x140}, 0x4, 0x700000000000700}, 0x0) sendmsg$IEEE802154_LLSEC_SETPARAMS(0xffffffffffffffff, &(0x7f0000000c40)={&(0x7f0000000b40)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000c00)={&(0x7f0000000b80)={0x5c, r14, 0x200, 0x70bd2d, 0x25dfdbfc, {}, [@IEEE802154_ATTR_SHORT_ADDR={0x6, 0x4, 0xaaa3}, @IEEE802154_ATTR_HW_ADDR={0xc, 0x5, {0xaaaaaaaaaaaa0102}}, @IEEE802154_ATTR_LLSEC_KEY_ID={0x5, 0x2e, 0xfe}, @IEEE802154_ATTR_LLSEC_KEY_MODE={0x5, 0x2b, 0x3}, @IEEE802154_ATTR_LLSEC_FRAME_COUNTER={0x8}, @IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED={0xc}, @IEEE802154_ATTR_LLSEC_SECLEVEL={0x5}, @IEEE802154_ATTR_LLSEC_ENABLED={0x5, 0x29, 0x1}]}, 0x5c}, 0x1, 0x0, 0x0, 0x5}, 0x14000044) (async) r15 = socket$nl_generic(0x10, 0x3, 0x10) (async) r16 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r15, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r15, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)=ANY=[@ANYBLOB, @ANYRES16=r16, @ANYBLOB="010000000000000000005a00000008000300", @ANYRES32=r17, @ANYBLOB], 0x1c}}, 0x0) sendmsg$MPTCP_PM_CMD_SET_LIMITS(r15, &(0x7f0000000780)={&(0x7f0000000680)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000740)={&(0x7f00000006c0)={0x68, 0x0, 0x10, 0x70bd29, 0x25dfdbfe, {}, [@MPTCP_PM_ATTR_ADDR={0xc, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FLAGS={0x8, 0x6, 0x4}]}, @MPTCP_PM_ATTR_ADDR={0x4}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x3}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x1}, @MPTCP_PM_ATTR_SUBFLOWS={0x8}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x1}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x4}, @MPTCP_PM_ATTR_ADDR={0x14, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FAMILY={0x6, 0x1, 0xa}, @MPTCP_PM_ADDR_ATTR_ADDR4={0x8, 0x3, @empty}]}]}, 0x68}, 0x1, 0x0, 0x0, 0x801}, 0x800) sendmsg$NL802154_CMD_GET_SEC_DEVKEY(r6, &(0x7f00000005c0)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000580)={&(0x7f0000000540)={0x34, r7, 0x0, 0x70bd2b, 0x25dfdbff, {}, [@NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x300000003}, @NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x3}, @NL802154_ATTR_IFINDEX={0x8, 0x3, r8}]}, 0x34}, 0x1, 0x0, 0x0, 0x890}, 0x4004010) 19:46:58 executing program 3: r0 = socket(0x1, 0x803, 0x0) getsockname$packet(r0, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r2 = socket$nl_route(0x10, 0x3, 0x0) socket(0x1, 0x803, 0x0) (async) r3 = socket(0x1, 0x803, 0x0) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) sendmsg$nl_route(r2, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000600)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="04a0040000000000140003006272696467655f736c6176655f31000008000a00", @ANYRES32=r4], 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000600)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="04a0040000000000140003006272696467655f736c6176655f31000008000a00", @ANYRES32=r4], 0x3c}}, 0x0) ioctl$sock_ipv6_tunnel_SIOCDELTUNNEL(r0, 0x89f2, &(0x7f0000000280)={'ip6_vti0\x00', &(0x7f0000000200)={'ip6tnl0\x00', r1, 0x4, 0xff, 0x4d, 0x0, 0x40, @dev={0xfe, 0x80, '\x00', 0x2e}, @private1={0xfc, 0x1, '\x00', 0x1}, 0x10, 0x700, 0x9, 0x4}}) sendmsg$nl_route_sched(r2, &(0x7f0000000680)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000640)={&(0x7f0000000600)=@newchain={0x34, 0x64, 0x8, 0x70bd26, 0x25dfdbfe, {0x0, 0x0, 0x0, r5, {0x8, 0x6}, {0xe, 0x8}, {0x6}}, [@TCA_CHAIN={0x8, 0xb, 0x5}, @TCA_RATE={0x6, 0x5, {0x1, 0xff}}]}, 0x34}, 0x1, 0x0, 0x0, 0x24002000}, 0x4800) (async) sendmsg$nl_route_sched(r2, &(0x7f0000000680)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000640)={&(0x7f0000000600)=@newchain={0x34, 0x64, 0x8, 0x70bd26, 0x25dfdbfe, {0x0, 0x0, 0x0, r5, {0x8, 0x6}, {0xe, 0x8}, {0x6}}, [@TCA_CHAIN={0x8, 0xb, 0x5}, @TCA_RATE={0x6, 0x5, {0x1, 0xff}}]}, 0x34}, 0x1, 0x0, 0x0, 0x24002000}, 0x4800) r6 = syz_genetlink_get_family_id$nl80211(&(0x7f00000000c0), r0) sendmsg$NL80211_CMD_UPDATE_OWE_INFO(r0, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000140)={&(0x7f0000000300)={0x2cc, r6, 0x300, 0x70bd29, 0x25dfdbfb, {{}, {@void, @val={0xc, 0x99, {0x7, 0x76}}}}, [@NL80211_ATTR_IE={0x1c0, 0x2a, [@channel_switch={0x25, 0x3, {0x1, 0xb4, 0x1}}, @fast_bss_trans={0x37, 0xbf, {0x2, 0x5, "fd6aa4831b7faef65601fb55e17a6ad6", "7092c043457c3fbf6e37afc641848b3aff7e170efbed8d8422f78ece3890eb0a", "3285dbda8634018a0598358287b5431dbfa741f6789b53186b3c3645e9a1cf18", [{0x2, 0x6, "36e9da15dee8"}, {0x4, 0x12, "92a80ef19936257a495c32d511b855a164d3"}, {0x3, 0x20, "0bbafa901cb79fe6da740dac55843b002e54fbcdd8658d454febe0b0decb85ea"}, {0x2, 0x4, "2363d6a2"}, {0x1, 0x27, "accf7089fce980a921f97f141094db9d644fd6dea1298237ed0cda1fdc9674b3dfe71a7e9aeca5"}]}}, @random_vendor={0xdd, 0x43, "769d838147aa60fc07b26d62044b4a4190c4680b32c89d564166e5777ed7fcd0fa9c30c5f76992dba9261239892fcdbf00b3ccb954c28883b33972e0670a210641cf51"}, @measure_req={0x26, 0x74, {0x2, 0x5, 0x8b, "94779d46836b54a2045519e9dccad9fae5e3fc0fc47ada0c1a8b3e4db0437cf9bc927d4f4d5651cd0b1a5b3bbc73881a0f75ec7dbf0b863b1e22819be28d1f86616de1d1f9ad0b557696e1e8b650f3ff2459efca0abec6cfdac6ea0dc6c6a728a5bffa786c4370b2fb5954b1867d0004fa"}}, @random_vendor={0xdd, 0x39, "d1857ba0c1ae9d8b0173717d45a7f22addcf8a7b1bc40451f177fc5b7e47f21f11e44a237d215bccf7e3d7ad4ccf7eaff73e73bfc7d0139713"}]}, @NL80211_ATTR_IE={0xe9, 0x2a, [@supported_rates={0x1, 0x8, [{0x75}, {0x1b}, {0x5}, {0xc, 0x1}, {0x6, 0x1}, {0x16, 0x1}, {0x6}, {0x4f}]}, @mesh_config={0x71, 0x7, {0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x5, 0x48}}, @link_id={0x65, 0x12, {@from_mac=@device_b, @device_b, @device_b}}, @mesh_chsw={0x76, 0x6, {0x1f, 0x0, 0x32, 0x4}}, @mesh_id={0x72, 0x6}, @link_id={0x65, 0x12, {@random="2ff66796c0d6", @device_b, @broadcast}}, @random_vendor={0xdd, 0x90, "00f7903d8c9b5ce9a742d947cf3a0c387c9032404699f30ca7a2c0f14f5cb3e142d0bc28d8c7f093c3e9ce8eb89fed4f56de78b1778497cab2215a7cd433333ea213c29debfda0711798bd40e00462fd388974c1ebc57ee5e7e320a656fd6f954b6d8201111eb6406cc25c7389c1a54e41582ea0b81bc1b77be25def9aeddf6cf38aebd0946f7f7348903ecd05c3c119"}, @mesh_chsw={0x76, 0x6, {0x76, 0x1, 0x39, 0x1}}]}]}, 0x2cc}, 0x1, 0x0, 0x0, 0x4040000}, 0x0) getsockopt$inet_sctp_SCTP_FRAGMENT_INTERLEAVE(r0, 0x84, 0x12, &(0x7f0000000000), &(0x7f0000000040)=0x4) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) 19:46:58 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x618d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:58 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x48030000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:58 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x48000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2624.714245][T26420] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2624.885149][T26420] bond1183: entered promiscuous mode [ 2624.906265][T26420] 8021q: adding VLAN 0 to HW filter on device bond1183 [ 2624.944725][T26419] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2624.987290][T26424] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2625.073455][T26424] bond1108: entered promiscuous mode [ 2625.079792][T26424] 8021q: adding VLAN 0 to HW filter on device bond1108 [ 2625.099857][T26423] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2625.169279][T26423] bond1222: entered promiscuous mode [ 2625.175819][T26423] 8021q: adding VLAN 0 to HW filter on device bond1222 19:46:59 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x608d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:46:59 executing program 3: r0 = socket(0x1, 0x803, 0x0) getsockname$packet(r0, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x1, 0x803, 0x0) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) sendmsg$nl_route(r2, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000600)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="04a0040000000000140003006272696467655f736c6176655f31000008000a00", @ANYRES32=r4], 0x3c}}, 0x0) ioctl$sock_ipv6_tunnel_SIOCDELTUNNEL(r0, 0x89f2, &(0x7f0000000280)={'ip6_vti0\x00', &(0x7f0000000200)={'ip6tnl0\x00', r1, 0x4, 0xff, 0x4d, 0x0, 0x40, @dev={0xfe, 0x80, '\x00', 0x2e}, @private1={0xfc, 0x1, '\x00', 0x1}, 0x10, 0x700, 0x9, 0x4}}) sendmsg$nl_route_sched(r2, &(0x7f0000000680)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000640)={&(0x7f0000000600)=@newchain={0x34, 0x64, 0x8, 0x70bd26, 0x25dfdbfe, {0x0, 0x0, 0x0, r5, {0x8, 0x6}, {0xe, 0x8}, {0x6}}, [@TCA_CHAIN={0x8, 0xb, 0x5}, @TCA_RATE={0x6, 0x5, {0x1, 0xff}}]}, 0x34}, 0x1, 0x0, 0x0, 0x24002000}, 0x4800) r6 = syz_genetlink_get_family_id$nl80211(&(0x7f00000000c0), r0) sendmsg$NL80211_CMD_UPDATE_OWE_INFO(r0, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000140)={&(0x7f0000000300)={0x2cc, r6, 0x300, 0x70bd29, 0x25dfdbfb, {{}, {@void, @val={0xc, 0x99, {0x7, 0x76}}}}, [@NL80211_ATTR_IE={0x1c0, 0x2a, [@channel_switch={0x25, 0x3, {0x1, 0xb4, 0x1}}, @fast_bss_trans={0x37, 0xbf, {0x2, 0x5, "fd6aa4831b7faef65601fb55e17a6ad6", "7092c043457c3fbf6e37afc641848b3aff7e170efbed8d8422f78ece3890eb0a", "3285dbda8634018a0598358287b5431dbfa741f6789b53186b3c3645e9a1cf18", [{0x2, 0x6, "36e9da15dee8"}, {0x4, 0x12, "92a80ef19936257a495c32d511b855a164d3"}, {0x3, 0x20, "0bbafa901cb79fe6da740dac55843b002e54fbcdd8658d454febe0b0decb85ea"}, {0x2, 0x4, "2363d6a2"}, {0x1, 0x27, "accf7089fce980a921f97f141094db9d644fd6dea1298237ed0cda1fdc9674b3dfe71a7e9aeca5"}]}}, @random_vendor={0xdd, 0x43, "769d838147aa60fc07b26d62044b4a4190c4680b32c89d564166e5777ed7fcd0fa9c30c5f76992dba9261239892fcdbf00b3ccb954c28883b33972e0670a210641cf51"}, @measure_req={0x26, 0x74, {0x2, 0x5, 0x8b, "94779d46836b54a2045519e9dccad9fae5e3fc0fc47ada0c1a8b3e4db0437cf9bc927d4f4d5651cd0b1a5b3bbc73881a0f75ec7dbf0b863b1e22819be28d1f86616de1d1f9ad0b557696e1e8b650f3ff2459efca0abec6cfdac6ea0dc6c6a728a5bffa786c4370b2fb5954b1867d0004fa"}}, @random_vendor={0xdd, 0x39, "d1857ba0c1ae9d8b0173717d45a7f22addcf8a7b1bc40451f177fc5b7e47f21f11e44a237d215bccf7e3d7ad4ccf7eaff73e73bfc7d0139713"}]}, @NL80211_ATTR_IE={0xe9, 0x2a, [@supported_rates={0x1, 0x8, [{0x75}, {0x1b}, {0x5}, {0xc, 0x1}, {0x6, 0x1}, {0x16, 0x1}, {0x6}, {0x4f}]}, @mesh_config={0x71, 0x7, {0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x5, 0x48}}, @link_id={0x65, 0x12, {@from_mac=@device_b, @device_b, @device_b}}, @mesh_chsw={0x76, 0x6, {0x1f, 0x0, 0x32, 0x4}}, @mesh_id={0x72, 0x6}, @link_id={0x65, 0x12, {@random="2ff66796c0d6", @device_b, @broadcast}}, @random_vendor={0xdd, 0x90, "00f7903d8c9b5ce9a742d947cf3a0c387c9032404699f30ca7a2c0f14f5cb3e142d0bc28d8c7f093c3e9ce8eb89fed4f56de78b1778497cab2215a7cd433333ea213c29debfda0711798bd40e00462fd388974c1ebc57ee5e7e320a656fd6f954b6d8201111eb6406cc25c7389c1a54e41582ea0b81bc1b77be25def9aeddf6cf38aebd0946f7f7348903ecd05c3c119"}, @mesh_chsw={0x76, 0x6, {0x76, 0x1, 0x39, 0x1}}]}]}, 0x2cc}, 0x1, 0x0, 0x0, 0x4040000}, 0x0) getsockopt$inet_sctp_SCTP_FRAGMENT_INTERLEAVE(r0, 0x84, 0x12, &(0x7f0000000000), &(0x7f0000000040)=0x4) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) socket(0x1, 0x803, 0x0) (async) getsockname$packet(r0, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket(0x1, 0x803, 0x0) (async) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) (async) sendmsg$nl_route(r2, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000600)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="04a0040000000000140003006272696467655f736c6176655f31000008000a00", @ANYRES32=r4], 0x3c}}, 0x0) (async) ioctl$sock_ipv6_tunnel_SIOCDELTUNNEL(r0, 0x89f2, &(0x7f0000000280)={'ip6_vti0\x00', &(0x7f0000000200)={'ip6tnl0\x00', r1, 0x4, 0xff, 0x4d, 0x0, 0x40, @dev={0xfe, 0x80, '\x00', 0x2e}, @private1={0xfc, 0x1, '\x00', 0x1}, 0x10, 0x700, 0x9, 0x4}}) (async) sendmsg$nl_route_sched(r2, &(0x7f0000000680)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000640)={&(0x7f0000000600)=@newchain={0x34, 0x64, 0x8, 0x70bd26, 0x25dfdbfe, {0x0, 0x0, 0x0, r5, {0x8, 0x6}, {0xe, 0x8}, {0x6}}, [@TCA_CHAIN={0x8, 0xb, 0x5}, @TCA_RATE={0x6, 0x5, {0x1, 0xff}}]}, 0x34}, 0x1, 0x0, 0x0, 0x24002000}, 0x4800) (async) syz_genetlink_get_family_id$nl80211(&(0x7f00000000c0), r0) (async) sendmsg$NL80211_CMD_UPDATE_OWE_INFO(r0, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000140)={&(0x7f0000000300)={0x2cc, r6, 0x300, 0x70bd29, 0x25dfdbfb, {{}, {@void, @val={0xc, 0x99, {0x7, 0x76}}}}, [@NL80211_ATTR_IE={0x1c0, 0x2a, [@channel_switch={0x25, 0x3, {0x1, 0xb4, 0x1}}, @fast_bss_trans={0x37, 0xbf, {0x2, 0x5, "fd6aa4831b7faef65601fb55e17a6ad6", "7092c043457c3fbf6e37afc641848b3aff7e170efbed8d8422f78ece3890eb0a", "3285dbda8634018a0598358287b5431dbfa741f6789b53186b3c3645e9a1cf18", [{0x2, 0x6, "36e9da15dee8"}, {0x4, 0x12, "92a80ef19936257a495c32d511b855a164d3"}, {0x3, 0x20, "0bbafa901cb79fe6da740dac55843b002e54fbcdd8658d454febe0b0decb85ea"}, {0x2, 0x4, "2363d6a2"}, {0x1, 0x27, "accf7089fce980a921f97f141094db9d644fd6dea1298237ed0cda1fdc9674b3dfe71a7e9aeca5"}]}}, @random_vendor={0xdd, 0x43, "769d838147aa60fc07b26d62044b4a4190c4680b32c89d564166e5777ed7fcd0fa9c30c5f76992dba9261239892fcdbf00b3ccb954c28883b33972e0670a210641cf51"}, @measure_req={0x26, 0x74, {0x2, 0x5, 0x8b, "94779d46836b54a2045519e9dccad9fae5e3fc0fc47ada0c1a8b3e4db0437cf9bc927d4f4d5651cd0b1a5b3bbc73881a0f75ec7dbf0b863b1e22819be28d1f86616de1d1f9ad0b557696e1e8b650f3ff2459efca0abec6cfdac6ea0dc6c6a728a5bffa786c4370b2fb5954b1867d0004fa"}}, @random_vendor={0xdd, 0x39, "d1857ba0c1ae9d8b0173717d45a7f22addcf8a7b1bc40451f177fc5b7e47f21f11e44a237d215bccf7e3d7ad4ccf7eaff73e73bfc7d0139713"}]}, @NL80211_ATTR_IE={0xe9, 0x2a, [@supported_rates={0x1, 0x8, [{0x75}, {0x1b}, {0x5}, {0xc, 0x1}, {0x6, 0x1}, {0x16, 0x1}, {0x6}, {0x4f}]}, @mesh_config={0x71, 0x7, {0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x5, 0x48}}, @link_id={0x65, 0x12, {@from_mac=@device_b, @device_b, @device_b}}, @mesh_chsw={0x76, 0x6, {0x1f, 0x0, 0x32, 0x4}}, @mesh_id={0x72, 0x6}, @link_id={0x65, 0x12, {@random="2ff66796c0d6", @device_b, @broadcast}}, @random_vendor={0xdd, 0x90, "00f7903d8c9b5ce9a742d947cf3a0c387c9032404699f30ca7a2c0f14f5cb3e142d0bc28d8c7f093c3e9ce8eb89fed4f56de78b1778497cab2215a7cd433333ea213c29debfda0711798bd40e00462fd388974c1ebc57ee5e7e320a656fd6f954b6d8201111eb6406cc25c7389c1a54e41582ea0b81bc1b77be25def9aeddf6cf38aebd0946f7f7348903ecd05c3c119"}, @mesh_chsw={0x76, 0x6, {0x76, 0x1, 0x39, 0x1}}]}]}, 0x2cc}, 0x1, 0x0, 0x0, 0x4040000}, 0x0) (async) getsockopt$inet_sctp_SCTP_FRAGMENT_INTERLEAVE(r0, 0x84, 0x12, &(0x7f0000000000), &(0x7f0000000040)=0x4) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) [ 2625.368109][T26431] bond1108: (slave bridge1038): making interface the new active one [ 2625.377975][T26431] bridge1038: entered promiscuous mode [ 2625.395544][T26431] bond1108: (slave bridge1038): Enslaving as an active interface with an up link 19:46:59 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x628d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2625.635484][T26432] bond1183: (slave bridge1086): making interface the new active one [ 2625.643840][T26432] bridge1086: entered promiscuous mode [ 2625.668482][T26432] bond1183: (slave bridge1086): Enslaving as an active interface with an up link 19:46:59 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4a000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2625.809417][T26433] bond1222: (slave bridge1152): making interface the new active one [ 2625.843694][T26433] bridge1152: entered promiscuous mode [ 2625.862587][T26433] bond1222: (slave bridge1152): Enslaving as an active interface with an up link 19:46:59 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4a000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2625.938061][T26444] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 2626.029125][T26444] bond237: entered promiscuous mode 19:47:00 executing program 3: syz_emit_ethernet(0x4a, &(0x7f0000000800)={@link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0xe}, @random="8afc14827e90", @void, {@mpls_uc={0x8847, {[{0x8}, {0x59, 0x0, 0x1}, {0x9, 0x0, 0x1}, {0x20, 0x0, 0x1}], @generic="86066e8ae67def4c2ecf47864b2ebf948c35a94599d3c13b7f5a59e4fae14a475b1865661b6108a8b233c86e"}}}}, 0x0) [ 2626.054191][T26444] 8021q: adding VLAN 0 to HW filter on device bond237 [ 2626.078064][T26455] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2626.271914][T26455] bond1109: entered promiscuous mode [ 2626.291759][T26455] 8021q: adding VLAN 0 to HW filter on device bond1109 [ 2626.436173][T26456] bond1109: (slave bridge1039): making interface the new active one [ 2626.445981][T26456] bridge1039: entered promiscuous mode [ 2626.458386][T26456] bond1109: (slave bridge1039): Enslaving as an active interface with an up link [ 2626.468049][T26458] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2626.539293][T26458] bond1184: entered promiscuous mode [ 2626.545746][T26458] 8021q: adding VLAN 0 to HW filter on device bond1184 [ 2626.618249][T26459] bond1184: (slave bridge1087): making interface the new active one [ 2626.627780][T26459] bridge1087: entered promiscuous mode [ 2626.646793][T26459] bond1184: (slave bridge1087): Enslaving as an active interface with an up link [ 2626.685560][T26462] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2626.746628][T26462] bond1223: entered promiscuous mode [ 2626.752573][T26462] 8021q: adding VLAN 0 to HW filter on device bond1223 [ 2626.812033][T26463] bond1223: (slave bridge1153): making interface the new active one [ 2626.820429][T26463] bridge1153: entered promiscuous mode [ 2626.832334][T26463] bond1223: (slave bridge1153): Enslaving as an active interface with an up link 19:47:04 executing program 3: syz_emit_ethernet(0x4a, &(0x7f0000000800)={@link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0xe}, @random="8afc14827e90", @void, {@mpls_uc={0x8847, {[{0x8}, {0x59, 0x0, 0x1}, {0x9, 0x0, 0x1}, {0x20, 0x0, 0x1}], @generic="86066e8ae67def4c2ecf47864b2ebf948c35a94599d3c13b7f5a59e4fae14a475b1865661b6108a8b233c86e"}}}}, 0x0) 19:47:04 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000140)=ANY=[], 0x208e24b) bpf$OBJ_GET_MAP(0x7, &(0x7f0000000140)={&(0x7f00000000c0)='./file0\x00', 0x0, 0x8}, 0x10) socketpair$tipc(0x1e, 0x2, 0x0, &(0x7f0000000180)={0xffffffffffffffff}) mmap(&(0x7f0000ffd000/0x1000)=nil, 0x1000, 0x2000008, 0x11, r1, 0xc203e000) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r4, &(0x7f00000004c0)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0xce7b1dd20e6948d1}, 0xc, &(0x7f0000000480)={&(0x7f0000000500)=ANY=[@ANYBLOB="30000000190000012dbd7000fedbdf25021420093100ff0b0004000005001b00110000000c00168005000900000000005bccc6135be8f21a8cb71fe89298c143084a1f9f95914902d11d5e952bf49a9f105dc461eeb79144ca15854466c023f2635e3fe92528e54cb52571d67a72489802d699f38373f50b79f98504bc914cd0236ee3855de05d1efcfd1c748efa129da88362ac9ae914978ad28feb81d28dee59ed44e534b11ebf16671f2fd0b38612ada32751689e9f13570044c1c9a0a0873ab5715634da4f4887cc4846c139e4a11317ea38fe206b082d035225bcb0033a77db5d2f64fc6864dae1d327d11afc2f25"], 0x30}, 0x1, 0x0, 0x0, 0x20000000}, 0x200480c4) r5 = syz_genetlink_get_family_id$ethtool(&(0x7f00000002c0), 0xffffffffffffffff) r6 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000240), r0) ioctl$ifreq_SIOCGIFINDEX_vcan(r3, 0x8933, &(0x7f0000000280)={'vxcan0\x00', 0x0}) sendmsg$ETHTOOL_MSG_TSINFO_GET(r0, &(0x7f00000003c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000380)={&(0x7f0000000300)={0x78, r6, 0x1, 0x70bd26, 0x25dfdbff, {}, [@HEADER={0x64, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x2}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x3}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r7}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'syzkaller0\x00'}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x3}, @ETHTOOL_A_HEADER_FLAGS={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'vcan0\x00'}, @ETHTOOL_A_HEADER_FLAGS={0x8}]}]}, 0x78}, 0x1, 0x0, 0x0, 0x20000800}, 0x4880) sendmsg$ETHTOOL_MSG_COALESCE_SET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000000)={0x1c, r5, 0x1, 0x70bd2c, 0x0, {}, [@ETHTOOL_A_COALESCE_RX_USECS_IRQ={0x8, 0x4, 0x8}]}, 0x1c}}, 0x0) sendfile(r3, r2, 0x0, 0xffffffff) 19:47:04 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x638d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:04 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4c000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:04 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4c000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:04 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000001500)=ANY=[@ANYRES64=r0, @ANYRES32=r0], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x200000b, 0x28011, r0, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r2 = socket$alg(0x26, 0x5, 0x0) bind$alg(r2, &(0x7f00000000c0)={0x26, 'skcipher\x00', 0x0, 0x0, 'cbc(des3_ede)\x00'}, 0x58) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000000)="8db4c6d3916872c4d26e8e39f30e9ce9ab2f204389cf53c6", 0x18) r3 = accept$alg(r2, 0x0, 0x0) r4 = socket$nl_generic(0x10, 0x3, 0x10) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$EXT4_IOC_GROUP_ADD(0xffffffffffffffff, 0x40286608, &(0x7f00000014c0)={0xfffe0000, 0x4, 0x9e, 0x5, 0x3, 0x9}) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendfile(r3, r4, 0x0, 0x10000a006) accept4$ax25(r1, &(0x7f0000000140)={{0x3, @bcast}, [@netrom, @bcast, @null, @bcast, @bcast, @rose, @null, @bcast]}, &(0x7f0000000040)=0x48, 0x0) r7 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000000000000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) write$binfmt_script(0xffffffffffffffff, &(0x7f00000015c0)={'#! ', './file0', [{0x20, 'vlan0\x00'}, {0x20, ')-\\!\\[\''}, {0x20, '\x00'}], 0xa, "5ac74974718eb47c9c547d7e45b9d93d2ba5231317a0f21eb4c14c47c1e19ee3512cc2e116ef6917d53aec77fcbeff9c76c0dd7357414ab759f0a993db9ce5920d560979cdd009"}, 0x63) r8 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000380)='rcu_utilization\x00', r7}, 0x10) sendfile(r0, r3, &(0x7f0000000280)=0x4, 0x5) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(0xffffffffffffffff, 0x8982, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000001480)={&(0x7f00000002c0)='vnet_skip_tx_trigger\x00', r1}, 0x10) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r8, 0x81f8943c, 0x0) ioctl$BTRFS_IOC_TREE_SEARCH(0xffffffffffffffff, 0xd0009411, &(0x7f0000000300)={{0x0, 0x200, 0x1000, 0x3, 0x1, 0x8, 0x0, 0x3, 0x0, 0x40000000, 0x100, 0x1, 0x3, 0xcc, 0x4cd}}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r7, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) socketpair$nbd(0x1, 0x1, 0x0, &(0x7f0000000100)={0xffffffffffffffff}) r10 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r10, &(0x7f0000001400)={0x0, 0x0, &(0x7f0000001440)={0x0}, 0x1, 0x0, 0x0, 0x2400c800}, 0x4004050) getsockname$packet(0xffffffffffffffff, &(0x7f0000001640)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000001540)=0x14) setsockopt$ALG_SET_KEY(r10, 0x117, 0x1, &(0x7f0000001300)="e0d85939552deb5ddd35d66857a8ad8fcd017de2d36aa4a14153e25e77415ac0168ed85ed9fa689f397ba8dd332bd94218d7018109169c0dac59fe4d72dc0f34e50f1d59d5a57b3899dabde00806a862577fe8b4ded31ef8aa9bf67af6158fbc4bd19b7f068ed3d60e5364cf302a7db775f5798fed0e2e8e61535ae186e3a8805c16b7ebd7c32d2af01386f4bf2fe54a0112ea9b4e39cf9ce2bd0d72dacf3fe7d848005df5681172a1c92a4603625aad60d000c8620c7a647225544715bebccc5e263db245f3638aa43396b81ad73ad7153bd1c586e0041caccca34f772f12b17c4ec451f8b03487b0ce891634a9fb232b35a1428294d147638f", 0xfa) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r9, 0x8982, &(0x7f0000002800)={0x1, 'vlan0\x00', {}, 0x40}) 19:47:04 executing program 3: syz_emit_ethernet(0x4a, &(0x7f0000000800)={@link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0xe}, @random="8afc14827e90", @void, {@mpls_uc={0x8847, {[{0x8}, {0x59, 0x0, 0x1}, {0x9, 0x0, 0x1}, {0x20, 0x0, 0x1}], @generic="86066e8ae67def4c2ecf47864b2ebf948c35a94599d3c13b7f5a59e4fae14a475b1865661b6108a8b233c86e"}}}}, 0x0) [ 2630.752201][T26480] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2630.949057][T26480] bond1224: entered promiscuous mode [ 2630.961901][ T27] audit: type=1804 audit(1690919224.989:1868): pid=26492 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.2" name="/root/syzkaller-testdir1875037404/syzkaller.08FvcD/5620/cgroup.controllers" dev="sda1" ino=1967 res=1 errno=0 [ 2630.991081][T26480] 8021q: adding VLAN 0 to HW filter on device bond1224 [ 2631.000795][T26481] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:05 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000000)={@val={0x0, 0x8917}, @void, @mpls={[{0x9}], @llc={@llc={0x80, 0xfe, "1d20", "d5a175717cd9a7d92a58c6a04499ea64ae52da618daa4f44458e5eb392294a0661f7a2a42c5b9f96bcd5e1ba11ddec17e87cae095d6144fdcc1021063e47296f789711a81e6d6d2496a160e90400f0d612880d04f46ae00bfa"}}}}, 0x65) [ 2631.109395][T26481] bond1185: entered promiscuous mode [ 2631.123843][T26481] 8021q: adding VLAN 0 to HW filter on device bond1185 [ 2631.142046][T26479] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:05 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) write$tun(0xffffffffffffffff, &(0x7f0000000000)={@val={0x0, 0x8917}, @void, @mpls={[{0x9}], @llc={@llc={0x80, 0xfe, "1d20", "d5a175717cd9a7d92a58c6a04499ea64ae52da618daa4f44458e5eb392294a0661f7a2a42c5b9f96bcd5e1ba11ddec17e87cae095d6144fdcc1021063e47296f789711a81e6d6d2496a160e90400f0d612880d04f46ae00bfa"}}}}, 0x65) [ 2631.333041][T26479] bond1110: entered promiscuous mode 19:47:05 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000000)={@val={0x0, 0x8917}, @void, @mpls={[{0x9}], @llc={@llc={0x80, 0xfe, "1d20", "d5a175717cd9a7d92a58c6a04499ea64ae52da618daa4f44458e5eb392294a0661f7a2a42c5b9f96bcd5e1ba11ddec17e87cae095d6144fdcc1021063e47296f789711a81e6d6d2496a160e90400f0d612880d04f46ae00bfa"}}}}, 0x65) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) write$tun(0xffffffffffffffff, &(0x7f0000000000)={@val={0x0, 0x8917}, @void, @mpls={[{0x9}], @llc={@llc={0x80, 0xfe, "1d20", "d5a175717cd9a7d92a58c6a04499ea64ae52da618daa4f44458e5eb392294a0661f7a2a42c5b9f96bcd5e1ba11ddec17e87cae095d6144fdcc1021063e47296f789711a81e6d6d2496a160e90400f0d612880d04f46ae00bfa"}}}}, 0x65) (async) [ 2631.359017][T26479] 8021q: adding VLAN 0 to HW filter on device bond1110 19:47:05 executing program 3: getsockopt$inet_sctp_SCTP_LOCAL_AUTH_CHUNKS(0xffffffffffffffff, 0x84, 0x1b, &(0x7f0000000000)={0x0, 0x77, "291dd27f539825456ff969040773ed975ffada660dfb86d5fdad83985ce0887e916916d3fc4936729bd068d3b94743a372b8d416f7173a36b18bb529fdd07f64015c9821195df8d0252650d48dcc3ed0995eed6e878f7b7c9aec9135b42f86cec2569538fff8614fbffb958712e080f2c21081daedddaa"}, &(0x7f0000000080)=0x7f) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(0xffffffffffffffff, 0x84, 0x6f, &(0x7f0000000100)={r0, 0x1c, &(0x7f00000000c0)=[@in6={0xa, 0x4e20, 0x9, @remote, 0x6}]}, &(0x7f0000000140)=0x10) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) [ 2631.638762][ T27] audit: type=1804 audit(1690919225.669:1869): pid=26483 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir4109089803/syzkaller.bpdmKt/4187/cgroup.controllers" dev="sda1" ino=1966 res=1 errno=0 [ 2631.639034][T26487] bond1185: (slave bridge1088): making interface the new active one 19:47:05 executing program 3: getsockopt$inet_sctp_SCTP_LOCAL_AUTH_CHUNKS(0xffffffffffffffff, 0x84, 0x1b, &(0x7f0000000000)={0x0, 0x77, "291dd27f539825456ff969040773ed975ffada660dfb86d5fdad83985ce0887e916916d3fc4936729bd068d3b94743a372b8d416f7173a36b18bb529fdd07f64015c9821195df8d0252650d48dcc3ed0995eed6e878f7b7c9aec9135b42f86cec2569538fff8614fbffb958712e080f2c21081daedddaa"}, &(0x7f0000000080)=0x7f) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(0xffffffffffffffff, 0x84, 0x6f, &(0x7f0000000100)={r0, 0x1c, &(0x7f00000000c0)=[@in6={0xa, 0x4e20, 0x9, @remote, 0x6}]}, &(0x7f0000000140)=0x10) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) 19:47:05 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000140)=ANY=[], 0x208e24b) bpf$OBJ_GET_MAP(0x7, &(0x7f0000000140)={&(0x7f00000000c0)='./file0\x00', 0x0, 0x8}, 0x10) (async) bpf$OBJ_GET_MAP(0x7, &(0x7f0000000140)={&(0x7f00000000c0)='./file0\x00', 0x0, 0x8}, 0x10) socketpair$tipc(0x1e, 0x2, 0x0, &(0x7f0000000180)) (async) socketpair$tipc(0x1e, 0x2, 0x0, &(0x7f0000000180)={0xffffffffffffffff}) mmap(&(0x7f0000ffd000/0x1000)=nil, 0x1000, 0x2000008, 0x11, r1, 0xc203e000) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r4, &(0x7f00000004c0)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0xce7b1dd20e6948d1}, 0xc, &(0x7f0000000480)={&(0x7f0000000500)=ANY=[@ANYBLOB="30000000190000012dbd7000fedbdf25021420093100ff0b0004000005001b00110000000c00168005000900000000005bccc6135be8f21a8cb71fe89298c143084a1f9f95914902d11d5e952bf49a9f105dc461eeb79144ca15854466c023f2635e3fe92528e54cb52571d67a72489802d699f38373f50b79f98504bc914cd0236ee3855de05d1efcfd1c748efa129da88362ac9ae914978ad28feb81d28dee59ed44e534b11ebf16671f2fd0b38612ada32751689e9f13570044c1c9a0a0873ab5715634da4f4887cc4846c139e4a11317ea38fe206b082d035225bcb0033a77db5d2f64fc6864dae1d327d11afc2f25"], 0x30}, 0x1, 0x0, 0x0, 0x20000000}, 0x200480c4) (async) sendmsg$nl_route(r4, &(0x7f00000004c0)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0xce7b1dd20e6948d1}, 0xc, &(0x7f0000000480)={&(0x7f0000000500)=ANY=[@ANYBLOB="30000000190000012dbd7000fedbdf25021420093100ff0b0004000005001b00110000000c00168005000900000000005bccc6135be8f21a8cb71fe89298c143084a1f9f95914902d11d5e952bf49a9f105dc461eeb79144ca15854466c023f2635e3fe92528e54cb52571d67a72489802d699f38373f50b79f98504bc914cd0236ee3855de05d1efcfd1c748efa129da88362ac9ae914978ad28feb81d28dee59ed44e534b11ebf16671f2fd0b38612ada32751689e9f13570044c1c9a0a0873ab5715634da4f4887cc4846c139e4a11317ea38fe206b082d035225bcb0033a77db5d2f64fc6864dae1d327d11afc2f25"], 0x30}, 0x1, 0x0, 0x0, 0x20000000}, 0x200480c4) r5 = syz_genetlink_get_family_id$ethtool(&(0x7f00000002c0), 0xffffffffffffffff) r6 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000240), r0) ioctl$ifreq_SIOCGIFINDEX_vcan(r3, 0x8933, &(0x7f0000000280)={'vxcan0\x00'}) (async) ioctl$ifreq_SIOCGIFINDEX_vcan(r3, 0x8933, &(0x7f0000000280)={'vxcan0\x00', 0x0}) sendmsg$ETHTOOL_MSG_TSINFO_GET(r0, &(0x7f00000003c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000380)={&(0x7f0000000300)={0x78, r6, 0x1, 0x70bd26, 0x25dfdbff, {}, [@HEADER={0x64, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x2}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x3}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r7}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'syzkaller0\x00'}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x3}, @ETHTOOL_A_HEADER_FLAGS={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'vcan0\x00'}, @ETHTOOL_A_HEADER_FLAGS={0x8}]}]}, 0x78}, 0x1, 0x0, 0x0, 0x20000800}, 0x4880) sendmsg$ETHTOOL_MSG_COALESCE_SET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000000)={0x1c, r5, 0x1, 0x70bd2c, 0x0, {}, [@ETHTOOL_A_COALESCE_RX_USECS_IRQ={0x8, 0x4, 0x8}]}, 0x1c}}, 0x0) sendfile(r3, r2, 0x0, 0xffffffff) (async) sendfile(r3, r2, 0x0, 0xffffffff) [ 2631.714673][T26487] bridge1088: entered promiscuous mode [ 2631.748790][T26487] bond1185: (slave bridge1088): Enslaving as an active interface with an up link 19:47:05 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x50030000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2631.932079][T26486] bond1224: (slave bridge1154): making interface the new active one [ 2631.952136][T26486] bridge1154: entered promiscuous mode [ 2631.980317][T26486] bond1224: (slave bridge1154): Enslaving as an active interface with an up link 19:47:06 executing program 3: getsockopt$inet_sctp_SCTP_LOCAL_AUTH_CHUNKS(0xffffffffffffffff, 0x84, 0x1b, &(0x7f0000000000)={0x0, 0x77, "291dd27f539825456ff969040773ed975ffada660dfb86d5fdad83985ce0887e916916d3fc4936729bd068d3b94743a372b8d416f7173a36b18bb529fdd07f64015c9821195df8d0252650d48dcc3ed0995eed6e878f7b7c9aec9135b42f86cec2569538fff8614fbffb958712e080f2c21081daedddaa"}, &(0x7f0000000080)=0x7f) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(0xffffffffffffffff, 0x84, 0x6f, &(0x7f0000000100)={r0, 0x1c, &(0x7f00000000c0)=[@in6={0xa, 0x4e20, 0x9, @remote, 0x6}]}, &(0x7f0000000140)=0x10) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) 19:47:06 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x648d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:06 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x54030000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2632.121919][T26488] bond1110: (slave bridge1040): making interface the new active one [ 2632.130659][T26488] bridge1040: entered promiscuous mode [ 2632.153982][T26488] bond1110: (slave bridge1040): Enslaving as an active interface with an up link [ 2632.249953][ T27] audit: type=1804 audit(1690919226.279:1870): pid=26518 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir4109089803/syzkaller.bpdmKt/4188/cgroup.controllers" dev="sda1" ino=1956 res=1 errno=0 19:47:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000001500)=ANY=[@ANYRES64=r0, @ANYRES32=r0], 0x208e24b) (async) write$binfmt_script(r0, &(0x7f0000001500)=ANY=[@ANYRES64=r0, @ANYRES32=r0], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x200000b, 0x28011, r0, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r2 = socket$alg(0x26, 0x5, 0x0) bind$alg(r2, &(0x7f00000000c0)={0x26, 'skcipher\x00', 0x0, 0x0, 'cbc(des3_ede)\x00'}, 0x58) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000000)="8db4c6d3916872c4d26e8e39f30e9ce9ab2f204389cf53c6", 0x18) (async) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000000)="8db4c6d3916872c4d26e8e39f30e9ce9ab2f204389cf53c6", 0x18) r3 = accept$alg(r2, 0x0, 0x0) r4 = socket$nl_generic(0x10, 0x3, 0x10) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$EXT4_IOC_GROUP_ADD(0xffffffffffffffff, 0x40286608, &(0x7f00000014c0)={0xfffe0000, 0x4, 0x9e, 0x5, 0x3, 0x9}) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendfile(r3, r4, 0x0, 0x10000a006) (async) sendfile(r3, r4, 0x0, 0x10000a006) accept4$ax25(r1, &(0x7f0000000140)={{0x3, @bcast}, [@netrom, @bcast, @null, @bcast, @bcast, @rose, @null, @bcast]}, &(0x7f0000000040)=0x48, 0x0) r7 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000000000000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) write$binfmt_script(0xffffffffffffffff, &(0x7f00000015c0)={'#! ', './file0', [{0x20, 'vlan0\x00'}, {0x20, ')-\\!\\[\''}, {0x20, '\x00'}], 0xa, "5ac74974718eb47c9c547d7e45b9d93d2ba5231317a0f21eb4c14c47c1e19ee3512cc2e116ef6917d53aec77fcbeff9c76c0dd7357414ab759f0a993db9ce5920d560979cdd009"}, 0x63) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000380)='rcu_utilization\x00', r7}, 0x10) (async) r8 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000380)='rcu_utilization\x00', r7}, 0x10) sendfile(r0, r3, &(0x7f0000000280)=0x4, 0x5) (async) sendfile(r0, r3, &(0x7f0000000280)=0x4, 0x5) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(0xffffffffffffffff, 0x8982, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000001480)={&(0x7f00000002c0)='vnet_skip_tx_trigger\x00', r1}, 0x10) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000001480)={&(0x7f00000002c0)='vnet_skip_tx_trigger\x00', r1}, 0x10) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r8, 0x81f8943c, 0x0) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r8, 0x81f8943c, 0x0) ioctl$BTRFS_IOC_TREE_SEARCH(0xffffffffffffffff, 0xd0009411, &(0x7f0000000300)={{0x0, 0x200, 0x1000, 0x3, 0x1, 0x8, 0x0, 0x3, 0x0, 0x40000000, 0x100, 0x1, 0x3, 0xcc, 0x4cd}}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r7, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) socketpair$nbd(0x1, 0x1, 0x0, &(0x7f0000000100)={0xffffffffffffffff}) r10 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r10, &(0x7f0000001400)={0x0, 0x0, &(0x7f0000001440)={0x0}, 0x1, 0x0, 0x0, 0x2400c800}, 0x4004050) getsockname$packet(0xffffffffffffffff, &(0x7f0000001640)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000001540)=0x14) (async) getsockname$packet(0xffffffffffffffff, &(0x7f0000001640)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000001540)=0x14) setsockopt$ALG_SET_KEY(r10, 0x117, 0x1, &(0x7f0000001300)="e0d85939552deb5ddd35d66857a8ad8fcd017de2d36aa4a14153e25e77415ac0168ed85ed9fa689f397ba8dd332bd94218d7018109169c0dac59fe4d72dc0f34e50f1d59d5a57b3899dabde00806a862577fe8b4ded31ef8aa9bf67af6158fbc4bd19b7f068ed3d60e5364cf302a7db775f5798fed0e2e8e61535ae186e3a8805c16b7ebd7c32d2af01386f4bf2fe54a0112ea9b4e39cf9ce2bd0d72dacf3fe7d848005df5681172a1c92a4603625aad60d000c8620c7a647225544715bebccc5e263db245f3638aa43396b81ad73ad7153bd1c586e0041caccca34f772f12b17c4ec451f8b03487b0ce891634a9fb232b35a1428294d147638f", 0xfa) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r9, 0x8982, &(0x7f0000002800)={0x1, 'vlan0\x00', {}, 0x40}) (async) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r9, 0x8982, &(0x7f0000002800)={0x1, 'vlan0\x00', {}, 0x40}) [ 2632.293673][T26514] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:06 executing program 3: syz_emit_ethernet(0x7, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x68, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64810102}, {{}, {}, {}, {0x8, 0x88be, 0x0, {{}, 0x1, {0xfffffffe}}}}}}}}, 0x0) syz_emit_ethernet(0x5e, &(0x7f0000000000)={@link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0x3}, @random="8d0eecb141e3", @val={@val={0x9100, 0x1, 0x1, 0x3}, {0x8100, 0x2, 0x0, 0x4}}, {@canfd={0xd, {{0x3, 0x0, 0x0, 0x1}, 0x28, 0x3, 0x0, 0x0, "91b361ed68d515c3d4db079274a8f9ff17c61f212f6bbc23dfe8db6aff52b0a834e1b038c20c2925579fd964c455af36c56ae0ea013f01bf77dfbb0283097677"}}}}, &(0x7f0000000080)={0x0, 0x3, [0x855, 0x5da, 0x4ba, 0x5ef]}) 19:47:06 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000140)=ANY=[], 0x208e24b) bpf$OBJ_GET_MAP(0x7, &(0x7f0000000140)={&(0x7f00000000c0)='./file0\x00', 0x0, 0x8}, 0x10) socketpair$tipc(0x1e, 0x2, 0x0, &(0x7f0000000180)={0xffffffffffffffff}) mmap(&(0x7f0000ffd000/0x1000)=nil, 0x1000, 0x2000008, 0x11, r1, 0xc203e000) (async) mmap(&(0x7f0000ffd000/0x1000)=nil, 0x1000, 0x2000008, 0x11, r1, 0xc203e000) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r4, &(0x7f00000004c0)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0xce7b1dd20e6948d1}, 0xc, &(0x7f0000000480)={&(0x7f0000000500)=ANY=[@ANYBLOB="30000000190000012dbd7000fedbdf25021420093100ff0b0004000005001b00110000000c00168005000900000000005bccc6135be8f21a8cb71fe89298c143084a1f9f95914902d11d5e952bf49a9f105dc461eeb79144ca15854466c023f2635e3fe92528e54cb52571d67a72489802d699f38373f50b79f98504bc914cd0236ee3855de05d1efcfd1c748efa129da88362ac9ae914978ad28feb81d28dee59ed44e534b11ebf16671f2fd0b38612ada32751689e9f13570044c1c9a0a0873ab5715634da4f4887cc4846c139e4a11317ea38fe206b082d035225bcb0033a77db5d2f64fc6864dae1d327d11afc2f25"], 0x30}, 0x1, 0x0, 0x0, 0x20000000}, 0x200480c4) r5 = syz_genetlink_get_family_id$ethtool(&(0x7f00000002c0), 0xffffffffffffffff) r6 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000240), r0) ioctl$ifreq_SIOCGIFINDEX_vcan(r3, 0x8933, &(0x7f0000000280)={'vxcan0\x00', 0x0}) sendmsg$ETHTOOL_MSG_TSINFO_GET(r0, &(0x7f00000003c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000380)={&(0x7f0000000300)={0x78, r6, 0x1, 0x70bd26, 0x25dfdbff, {}, [@HEADER={0x64, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x2}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x3}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r7}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'syzkaller0\x00'}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x3}, @ETHTOOL_A_HEADER_FLAGS={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'vcan0\x00'}, @ETHTOOL_A_HEADER_FLAGS={0x8}]}]}, 0x78}, 0x1, 0x0, 0x0, 0x20000800}, 0x4880) sendmsg$ETHTOOL_MSG_COALESCE_SET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000000)={0x1c, r5, 0x1, 0x70bd2c, 0x0, {}, [@ETHTOOL_A_COALESCE_RX_USECS_IRQ={0x8, 0x4, 0x8}]}, 0x1c}}, 0x0) (async) sendmsg$ETHTOOL_MSG_COALESCE_SET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000000)={0x1c, r5, 0x1, 0x70bd2c, 0x0, {}, [@ETHTOOL_A_COALESCE_RX_USECS_IRQ={0x8, 0x4, 0x8}]}, 0x1c}}, 0x0) sendfile(r3, r2, 0x0, 0xffffffff) (async) sendfile(r3, r2, 0x0, 0xffffffff) [ 2632.449780][T26514] bond1186: entered promiscuous mode [ 2632.457110][T26514] 8021q: adding VLAN 0 to HW filter on device bond1186 19:47:06 executing program 3: syz_emit_ethernet(0x7, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x68, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64810102}, {{}, {}, {}, {0x8, 0x88be, 0x0, {{}, 0x1, {0xfffffffe}}}}}}}}, 0x0) (async) syz_emit_ethernet(0x5e, &(0x7f0000000000)={@link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0x3}, @random="8d0eecb141e3", @val={@val={0x9100, 0x1, 0x1, 0x3}, {0x8100, 0x2, 0x0, 0x4}}, {@canfd={0xd, {{0x3, 0x0, 0x0, 0x1}, 0x28, 0x3, 0x0, 0x0, "91b361ed68d515c3d4db079274a8f9ff17c61f212f6bbc23dfe8db6aff52b0a834e1b038c20c2925579fd964c455af36c56ae0ea013f01bf77dfbb0283097677"}}}}, &(0x7f0000000080)={0x0, 0x3, [0x855, 0x5da, 0x4ba, 0x5ef]}) [ 2632.551227][T26515] bond1186: (slave bridge1089): making interface the new active one [ 2632.557933][ T27] audit: type=1804 audit(1690919226.579:1871): pid=26539 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.2" name="/root/syzkaller-testdir1875037404/syzkaller.08FvcD/5621/cgroup.controllers" dev="sda1" ino=1967 res=1 errno=0 [ 2632.560239][T26515] bridge1089: entered promiscuous mode 19:47:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000001500)=ANY=[@ANYRES64=r0, @ANYRES32=r0], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x200000b, 0x28011, r0, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r2 = socket$alg(0x26, 0x5, 0x0) bind$alg(r2, &(0x7f00000000c0)={0x26, 'skcipher\x00', 0x0, 0x0, 'cbc(des3_ede)\x00'}, 0x58) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000000)="8db4c6d3916872c4d26e8e39f30e9ce9ab2f204389cf53c6", 0x18) r3 = accept$alg(r2, 0x0, 0x0) (async) r4 = socket$nl_generic(0x10, 0x3, 0x10) (async) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$EXT4_IOC_GROUP_ADD(0xffffffffffffffff, 0x40286608, &(0x7f00000014c0)={0xfffe0000, 0x4, 0x9e, 0x5, 0x3, 0x9}) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) (async, rerun: 64) sendfile(r3, r4, 0x0, 0x10000a006) (async, rerun: 64) accept4$ax25(r1, &(0x7f0000000140)={{0x3, @bcast}, [@netrom, @bcast, @null, @bcast, @bcast, @rose, @null, @bcast]}, &(0x7f0000000040)=0x48, 0x0) (async) r7 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000000000000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) write$binfmt_script(0xffffffffffffffff, &(0x7f00000015c0)={'#! ', './file0', [{0x20, 'vlan0\x00'}, {0x20, ')-\\!\\[\''}, {0x20, '\x00'}], 0xa, "5ac74974718eb47c9c547d7e45b9d93d2ba5231317a0f21eb4c14c47c1e19ee3512cc2e116ef6917d53aec77fcbeff9c76c0dd7357414ab759f0a993db9ce5920d560979cdd009"}, 0x63) (async, rerun: 32) r8 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000380)='rcu_utilization\x00', r7}, 0x10) (async, rerun: 32) sendfile(r0, r3, &(0x7f0000000280)=0x4, 0x5) (async, rerun: 64) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(0xffffffffffffffff, 0x8982, 0x0) (rerun: 64) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000001480)={&(0x7f00000002c0)='vnet_skip_tx_trigger\x00', r1}, 0x10) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r8, 0x81f8943c, 0x0) ioctl$BTRFS_IOC_TREE_SEARCH(0xffffffffffffffff, 0xd0009411, &(0x7f0000000300)={{0x0, 0x200, 0x1000, 0x3, 0x1, 0x8, 0x0, 0x3, 0x0, 0x40000000, 0x100, 0x1, 0x3, 0xcc, 0x4cd}}) (async) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r7, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) socketpair$nbd(0x1, 0x1, 0x0, &(0x7f0000000100)={0xffffffffffffffff}) (async) r10 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r10, &(0x7f0000001400)={0x0, 0x0, &(0x7f0000001440)={0x0}, 0x1, 0x0, 0x0, 0x2400c800}, 0x4004050) (async) getsockname$packet(0xffffffffffffffff, &(0x7f0000001640)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000001540)=0x14) (async) setsockopt$ALG_SET_KEY(r10, 0x117, 0x1, &(0x7f0000001300)="e0d85939552deb5ddd35d66857a8ad8fcd017de2d36aa4a14153e25e77415ac0168ed85ed9fa689f397ba8dd332bd94218d7018109169c0dac59fe4d72dc0f34e50f1d59d5a57b3899dabde00806a862577fe8b4ded31ef8aa9bf67af6158fbc4bd19b7f068ed3d60e5364cf302a7db775f5798fed0e2e8e61535ae186e3a8805c16b7ebd7c32d2af01386f4bf2fe54a0112ea9b4e39cf9ce2bd0d72dacf3fe7d848005df5681172a1c92a4603625aad60d000c8620c7a647225544715bebccc5e263db245f3638aa43396b81ad73ad7153bd1c586e0041caccca34f772f12b17c4ec451f8b03487b0ce891634a9fb232b35a1428294d147638f", 0xfa) (async) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r9, 0x8982, &(0x7f0000002800)={0x1, 'vlan0\x00', {}, 0x40}) [ 2632.608236][T26515] bond1186: (slave bridge1089): Enslaving as an active interface with an up link [ 2632.644720][T26524] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:06 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x56030000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2632.716779][ T27] audit: type=1804 audit(1690919226.749:1872): pid=26541 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir4109089803/syzkaller.bpdmKt/4189/cgroup.controllers" dev="sda1" ino=1951 res=1 errno=0 19:47:06 executing program 3: syz_emit_ethernet(0x7, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x68, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64810102}, {{}, {}, {}, {0x8, 0x88be, 0x0, {{}, 0x1, {0xfffffffe}}}}}}}}, 0x0) (async) syz_emit_ethernet(0x7, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x68, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64810102}, {{}, {}, {}, {0x8, 0x88be, 0x0, {{}, 0x1, {0xfffffffe}}}}}}}}, 0x0) syz_emit_ethernet(0x5e, &(0x7f0000000000)={@link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0x3}, @random="8d0eecb141e3", @val={@val={0x9100, 0x1, 0x1, 0x3}, {0x8100, 0x2, 0x0, 0x4}}, {@canfd={0xd, {{0x3, 0x0, 0x0, 0x1}, 0x28, 0x3, 0x0, 0x0, "91b361ed68d515c3d4db079274a8f9ff17c61f212f6bbc23dfe8db6aff52b0a834e1b038c20c2925579fd964c455af36c56ae0ea013f01bf77dfbb0283097677"}}}}, &(0x7f0000000080)={0x0, 0x3, [0x855, 0x5da, 0x4ba, 0x5ef]}) 19:47:06 executing program 0: socketpair$unix(0x1, 0x0, 0x0, 0x0) connect$unix(0xffffffffffffffff, &(0x7f000057eff8)=@file={0x0, './file0\x00'}, 0x6e) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0xf03affff) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x8000000000004) r0 = socket$inet6(0xa, 0x800000000000002, 0x0) connect$inet6(r0, &(0x7f0000000080)={0xa, 0x0, 0x0, @private0}, 0x1c) r1 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000080)={&(0x7f0000000040)='sched_switch\x00'}, 0x10) r2 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000001c0)='sched_process_wait\x00'}, 0x10) r3 = bpf$ITER_CREATE(0xb, &(0x7f0000000100)={r2}, 0x8) write$cgroup_int(r3, &(0x7f00000001c0), 0xfffffdef) r4 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPSET_CMD_CREATE(r4, &(0x7f0000001080)={0x0, 0x0, &(0x7f0000000080)={&(0x7f0000000000)={0x60, 0x2, 0x6, 0x1, 0x0, 0x0, {}, [@IPSET_ATTR_TYPENAME={0xe, 0x3, 'bitmap:ip\x00'}, @IPSET_ATTR_REVISION={0x5}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz0\x00'}, @IPSET_ATTR_DATA={0x18, 0x7, 0x0, 0x1, [@IPSET_ATTR_IP={0xc, 0x1, 0x0, 0x1, @IPSET_ATTR_IPADDR_IPV4={0x8, 0x1, 0x1, 0x0, @empty=0x80ffffaf}}, @IPSET_ATTR_CADT_FLAGS={0x8, 0x6, 0x0}]}, @IPSET_ATTR_FAMILY={0x5, 0x5, 0x2}, @IPSET_ATTR_PROTOCOL={0x5, 0x1, 0x6}]}, 0x60}}, 0x0) sendmmsg$inet6(r0, &(0x7f0000000f40)=[{{0x0, 0xcb000000, 0x0}}], 0x28000, 0x80fe) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r6 = socket$nl_sock_diag(0x10, 0x3, 0x4) sendfile(r6, 0xffffffffffffffff, 0x0, 0x21fd1ee9) sendfile(r1, r6, &(0x7f0000000380)=0x6, 0x0) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) sendfile(r7, r5, 0x0, 0xf03affff) sendfile(r7, r5, 0x0, 0x8000000000004) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_int(r9, 0x0, 0x0) sendfile(r9, r8, 0x0, 0xf03affff) r10 = bpf$ITER_CREATE(0x21, &(0x7f00000000c0)={r7}, 0x8) write$binfmt_script(r10, &(0x7f00000003c0)=ANY=[@ANYBLOB="2321202e2f66696c65302f66696c653020267d5d405e2c3a40278e2f200020275c2e2e2e275b402f207929206d656d6f72792e6576656e7473002073797a30000a5dc783382d8c5ac86079b3b2cc148946339f58aaf6f30056bf499c9bae68f43044d0d652c3dba3bf9a58ce777416c0613914ea40a56676458a934807c304018891a8ee7169e6e507267a6f08de3424dff68bcaf9260822e80e2355fe35c531c641aa51a6e8cf3db361ac18aba7c3a91075bad721ded562c94a9dedee12776524cc2d79e1c831667b87b73489dd9d20ccd6310860bc47bbbf4e067d4e9c4aebd77cff5a3d46ce5a1406cad56127b72ef3409660a8e0d4218c16d6beb17efa63ffd88d3f8b48ba49f7de1c5971aaf3d48cf12817312907768ac063212a2651c19025"], 0xef) mmap(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x4010, r9, 0x5e5ba000) [ 2632.814481][ T27] audit: type=1804 audit(1690919226.749:1873): pid=26543 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir4109089803/syzkaller.bpdmKt/4189/cgroup.controllers" dev="sda1" ino=1951 res=1 errno=0 [ 2632.865029][T26524] bond1225: entered promiscuous mode [ 2632.874133][T26524] 8021q: adding VLAN 0 to HW filter on device bond1225 19:47:07 executing program 3: setsockopt$IPT_SO_SET_REPLACE(0xffffffffffffffff, 0x0, 0x40, &(0x7f0000000080)=@nat={'nat\x00', 0x1b, 0x5, 0x628, 0x0, 0xb8, 0xffffffff, 0x0, 0xb8, 0x590, 0x590, 0xffffffff, 0x590, 0x590, 0x5, &(0x7f0000000000), {[{{@ip={@multicast1, @empty, 0xffffffff, 0xffffff00, 'vcan0\x00', 'bond0\x00', {}, {0xff}, 0x11, 0x1, 0x8}, 0x0, 0x70, 0xb8}, @unspec=@DNAT1={0x48, 'DNAT\x00', 0x1, {0x4, @ipv4=@local, @ipv4=@multicast1, @port=0x4e20, @icmp_id=0x68}}}, {{@ip={@loopback, @rand_addr=0x64010102, 0x0, 0xffffff00, 'bridge0\x00', 'gretap0\x00', {}, {}, 0x2e, 0x1}, 0x0, 0x1b8, 0x1f0, 0x0, {}, [@common=@icmp={{0x28}, {0x3, "b201"}}, @common=@unspec=@comment={{0x120}}]}, @REDIRECT={0x38, 'REDIRECT\x00', 0x0, {0x1, {0x7, @rand_addr=0x64010102, @broadcast, @gre_key=0x8, @icmp_id=0x64}}}}, {{@ip={@local, @local, 0xff000000, 0xffffff00, 'veth1_vlan\x00', 'nicvf0\x00', {}, {}, 0x4, 0x3}, 0x0, 0x1d8, 0x210, 0x0, {}, [@common=@inet=@sctp={{0x148}, {[0x4e23, 0x4e24], [0x4e24, 0x4e24], [0x7, 0x4, 0x0, 0x2, 0x1, 0x2, 0x0, 0x0, 0x2, 0x1, 0x2, 0x9, 0x9, 0x80000001, 0x1000000, 0xafc, 0x6, 0x947b4a8, 0x5, 0xfff, 0x3360, 0x401, 0x3, 0xae5c, 0x2, 0x5, 0x1f, 0x1, 0x8, 0x6, 0xfff, 0x8000, 0x9, 0x1ef9, 0x801a, 0x9, 0x8001, 0x4, 0x8001, 0x4, 0xc4, 0x3, 0x400, 0x1ff6, 0x81, 0x6, 0x7000, 0x6, 0x2, 0x8000, 0x1, 0x7fff, 0x7, 0x80, 0x3ff, 0x10001, 0xfffffff8, 0x556e, 0x3, 0x4, 0x3, 0x0, 0x3, 0x9], 0x1, [{0xb0, 0x1, 0x3}, {0xa9, 0x0, 0x4}, {0x0, 0x0, 0x20}, {0x1f, 0x6, 0x98}], 0x3, 0x3, 0x2}}, @common=@socket0={{0x20}}]}, @NETMAP={0x38, 'NETMAP\x00', 0x0, {0x1, {0x18, @multicast2, @multicast1, @gre_key=0x8, @gre_key=0x3}}}}, {{@ip={@rand_addr=0x64010102, @private=0xa010100, 0xff, 0xffffff00, 'team_slave_0\x00', 'nicvf0\x00', {0xff}, {}, 0x6, 0x1, 0x12}, 0x0, 0xa0, 0xd8, 0x0, {}, [@common=@addrtype={{0x30}, {0x800, 0x2, 0x1}}]}, @MASQUERADE={0x38, 'MASQUERADE\x00', 0x0, {0x1, {0xf, @empty, @remote, @port=0x4e20, @icmp_id=0x5}}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x688) 19:47:07 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='memory.events\x00', 0x26e1, 0x0) sendfile(r0, r0, &(0x7f00000000c0)=0x9, 0x8000000000000000) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='memory.events\x00', 0x7a05, 0x1700) ioctl$FS_IOC_SETFLAGS(r1, 0x40086602, &(0x7f0000000000)) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r3 = bpf$ITER_CREATE(0x21, &(0x7f0000000080), 0x8) openat$cgroup_ro(r2, &(0x7f0000000100)='blkio.bfq.time_recursive\x00', 0x0, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='cgroup.kill\x00', 0x0, 0x0) write$cgroup_int(r2, &(0x7f0000000200), 0x806000) r4 = bpf$PROG_LOAD(0x5, &(0x7f00000013c0)={0x12, 0x10, &(0x7f0000001440)=@framed={{0x18, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x2}, [@alu={0x4, 0x0, 0xd, 0x8, 0x1, 0x18, 0x1}, @btf_id={0x18, 0xc, 0x3, 0x0, 0x2}, @jmp={0x5, 0x1, 0x1, 0x0, 0xa, 0xffffffffffffffaf, 0xfffffffffffffff0}, @call={0x85, 0x0, 0x0, 0x55}, @call={0x85, 0x0, 0x0, 0x8c}, @btf_id={0x18, 0x2, 0x3, 0x0, 0x1}, @cb_func={0x18, 0xb, 0x4, 0x0, 0xffffffffffffffff}, @cb_func={0x18, 0x2, 0x4, 0x0, 0x7}, @kfunc={0x85, 0x0, 0x2, 0x0, 0x5}]}, &(0x7f00000001c0)='GPL\x00', 0x2, 0x1000, &(0x7f00000003c0)=""/4096, 0x40f00, 0x16, '\x00', 0x0, 0x23, r0, 0x8, &(0x7f0000000240)={0x7, 0x3}, 0x8, 0x10, &(0x7f0000000280)={0x4, 0x10, 0x4, 0x7}, 0x10, 0xffffffffffffffff, r3, 0x0, &(0x7f0000000300)=[r2, 0xffffffffffffffff]}, 0x80) ioctl$EXT4_IOC_CLEAR_ES_CACHE(r4, 0x6628) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000380)={0x0, 0x0, 0xbe0f, 0x9ffffc}) [ 2633.120900][T26527] bond1225: (slave bridge1155): making interface the new active one [ 2633.131397][T26527] bridge1155: entered promiscuous mode [ 2633.179133][T26527] bond1225: (slave bridge1155): Enslaving as an active interface with an up link [ 2633.208435][T26529] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:07 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x56120000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:07 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x65580000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:07 executing program 3: setsockopt$IPT_SO_SET_REPLACE(0xffffffffffffffff, 0x0, 0x40, &(0x7f0000000080)=@nat={'nat\x00', 0x1b, 0x5, 0x628, 0x0, 0xb8, 0xffffffff, 0x0, 0xb8, 0x590, 0x590, 0xffffffff, 0x590, 0x590, 0x5, &(0x7f0000000000), {[{{@ip={@multicast1, @empty, 0xffffffff, 0xffffff00, 'vcan0\x00', 'bond0\x00', {}, {0xff}, 0x11, 0x1, 0x8}, 0x0, 0x70, 0xb8}, @unspec=@DNAT1={0x48, 'DNAT\x00', 0x1, {0x4, @ipv4=@local, @ipv4=@multicast1, @port=0x4e20, @icmp_id=0x68}}}, {{@ip={@loopback, @rand_addr=0x64010102, 0x0, 0xffffff00, 'bridge0\x00', 'gretap0\x00', {}, {}, 0x2e, 0x1}, 0x0, 0x1b8, 0x1f0, 0x0, {}, [@common=@icmp={{0x28}, {0x3, "b201"}}, @common=@unspec=@comment={{0x120}}]}, @REDIRECT={0x38, 'REDIRECT\x00', 0x0, {0x1, {0x7, @rand_addr=0x64010102, @broadcast, @gre_key=0x8, @icmp_id=0x64}}}}, {{@ip={@local, @local, 0xff000000, 0xffffff00, 'veth1_vlan\x00', 'nicvf0\x00', {}, {}, 0x4, 0x3}, 0x0, 0x1d8, 0x210, 0x0, {}, [@common=@inet=@sctp={{0x148}, {[0x4e23, 0x4e24], [0x4e24, 0x4e24], [0x7, 0x4, 0x0, 0x2, 0x1, 0x2, 0x0, 0x0, 0x2, 0x1, 0x2, 0x9, 0x9, 0x80000001, 0x1000000, 0xafc, 0x6, 0x947b4a8, 0x5, 0xfff, 0x3360, 0x401, 0x3, 0xae5c, 0x2, 0x5, 0x1f, 0x1, 0x8, 0x6, 0xfff, 0x8000, 0x9, 0x1ef9, 0x801a, 0x9, 0x8001, 0x4, 0x8001, 0x4, 0xc4, 0x3, 0x400, 0x1ff6, 0x81, 0x6, 0x7000, 0x6, 0x2, 0x8000, 0x1, 0x7fff, 0x7, 0x80, 0x3ff, 0x10001, 0xfffffff8, 0x556e, 0x3, 0x4, 0x3, 0x0, 0x3, 0x9], 0x1, [{0xb0, 0x1, 0x3}, {0xa9, 0x0, 0x4}, {0x0, 0x0, 0x20}, {0x1f, 0x6, 0x98}], 0x3, 0x3, 0x2}}, @common=@socket0={{0x20}}]}, @NETMAP={0x38, 'NETMAP\x00', 0x0, {0x1, {0x18, @multicast2, @multicast1, @gre_key=0x8, @gre_key=0x3}}}}, {{@ip={@rand_addr=0x64010102, @private=0xa010100, 0xff, 0xffffff00, 'team_slave_0\x00', 'nicvf0\x00', {0xff}, {}, 0x6, 0x1, 0x12}, 0x0, 0xa0, 0xd8, 0x0, {}, [@common=@addrtype={{0x30}, {0x800, 0x2, 0x1}}]}, @MASQUERADE={0x38, 'MASQUERADE\x00', 0x0, {0x1, {0xf, @empty, @remote, @port=0x4e20, @icmp_id=0x5}}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x688) 19:47:07 executing program 0: socketpair$unix(0x1, 0x0, 0x0, 0x0) (async) connect$unix(0xffffffffffffffff, &(0x7f000057eff8)=@file={0x0, './file0\x00'}, 0x6e) (async) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0xf03affff) (async) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x8000000000004) (async) r0 = socket$inet6(0xa, 0x800000000000002, 0x0) connect$inet6(r0, &(0x7f0000000080)={0xa, 0x0, 0x0, @private0}, 0x1c) (async) r1 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000080)={&(0x7f0000000040)='sched_switch\x00'}, 0x10) (async) r2 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000001c0)='sched_process_wait\x00'}, 0x10) r3 = bpf$ITER_CREATE(0xb, &(0x7f0000000100)={r2}, 0x8) write$cgroup_int(r3, &(0x7f00000001c0), 0xfffffdef) (async) r4 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPSET_CMD_CREATE(r4, &(0x7f0000001080)={0x0, 0x0, &(0x7f0000000080)={&(0x7f0000000000)={0x60, 0x2, 0x6, 0x1, 0x0, 0x0, {}, [@IPSET_ATTR_TYPENAME={0xe, 0x3, 'bitmap:ip\x00'}, @IPSET_ATTR_REVISION={0x5}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz0\x00'}, @IPSET_ATTR_DATA={0x18, 0x7, 0x0, 0x1, [@IPSET_ATTR_IP={0xc, 0x1, 0x0, 0x1, @IPSET_ATTR_IPADDR_IPV4={0x8, 0x1, 0x1, 0x0, @empty=0x80ffffaf}}, @IPSET_ATTR_CADT_FLAGS={0x8, 0x6, 0x0}]}, @IPSET_ATTR_FAMILY={0x5, 0x5, 0x2}, @IPSET_ATTR_PROTOCOL={0x5, 0x1, 0x6}]}, 0x60}}, 0x0) (async) sendmmsg$inet6(r0, &(0x7f0000000f40)=[{{0x0, 0xcb000000, 0x0}}], 0x28000, 0x80fe) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r6 = socket$nl_sock_diag(0x10, 0x3, 0x4) sendfile(r6, 0xffffffffffffffff, 0x0, 0x21fd1ee9) (async) sendfile(r1, r6, &(0x7f0000000380)=0x6, 0x0) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) (async) sendfile(r7, r5, 0x0, 0xf03affff) (async) sendfile(r7, r5, 0x0, 0x8000000000004) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_int(r9, 0x0, 0x0) sendfile(r9, r8, 0x0, 0xf03affff) (async) r10 = bpf$ITER_CREATE(0x21, &(0x7f00000000c0)={r7}, 0x8) write$binfmt_script(r10, &(0x7f00000003c0)=ANY=[@ANYBLOB="2321202e2f66696c65302f66696c653020267d5d405e2c3a40278e2f200020275c2e2e2e275b402f207929206d656d6f72792e6576656e7473002073797a30000a5dc783382d8c5ac86079b3b2cc148946339f58aaf6f30056bf499c9bae68f43044d0d652c3dba3bf9a58ce777416c0613914ea40a56676458a934807c304018891a8ee7169e6e507267a6f08de3424dff68bcaf9260822e80e2355fe35c531c641aa51a6e8cf3db361ac18aba7c3a91075bad721ded562c94a9dedee12776524cc2d79e1c831667b87b73489dd9d20ccd6310860bc47bbbf4e067d4e9c4aebd77cff5a3d46ce5a1406cad56127b72ef3409660a8e0d4218c16d6beb17efa63ffd88d3f8b48ba49f7de1c5971aaf3d48cf12817312907768ac063212a2651c19025"], 0xef) mmap(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x4010, r9, 0x5e5ba000) [ 2633.263272][T26529] workqueue: Failed to create a rescuer kthread for wq "bond1111": -EINTR [ 2633.462572][T26552] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:07 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='memory.events\x00', 0x26e1, 0x0) sendfile(r0, r0, &(0x7f00000000c0)=0x9, 0x8000000000000000) (async, rerun: 64) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='memory.events\x00', 0x7a05, 0x1700) (rerun: 64) ioctl$FS_IOC_SETFLAGS(r1, 0x40086602, &(0x7f0000000000)) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async, rerun: 32) r3 = bpf$ITER_CREATE(0x21, &(0x7f0000000080), 0x8) (rerun: 32) openat$cgroup_ro(r2, &(0x7f0000000100)='blkio.bfq.time_recursive\x00', 0x0, 0x0) (async) openat$cgroup_ro(r0, &(0x7f0000000040)='cgroup.kill\x00', 0x0, 0x0) write$cgroup_int(r2, &(0x7f0000000200), 0x806000) (async) r4 = bpf$PROG_LOAD(0x5, &(0x7f00000013c0)={0x12, 0x10, &(0x7f0000001440)=@framed={{0x18, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x2}, [@alu={0x4, 0x0, 0xd, 0x8, 0x1, 0x18, 0x1}, @btf_id={0x18, 0xc, 0x3, 0x0, 0x2}, @jmp={0x5, 0x1, 0x1, 0x0, 0xa, 0xffffffffffffffaf, 0xfffffffffffffff0}, @call={0x85, 0x0, 0x0, 0x55}, @call={0x85, 0x0, 0x0, 0x8c}, @btf_id={0x18, 0x2, 0x3, 0x0, 0x1}, @cb_func={0x18, 0xb, 0x4, 0x0, 0xffffffffffffffff}, @cb_func={0x18, 0x2, 0x4, 0x0, 0x7}, @kfunc={0x85, 0x0, 0x2, 0x0, 0x5}]}, &(0x7f00000001c0)='GPL\x00', 0x2, 0x1000, &(0x7f00000003c0)=""/4096, 0x40f00, 0x16, '\x00', 0x0, 0x23, r0, 0x8, &(0x7f0000000240)={0x7, 0x3}, 0x8, 0x10, &(0x7f0000000280)={0x4, 0x10, 0x4, 0x7}, 0x10, 0xffffffffffffffff, r3, 0x0, &(0x7f0000000300)=[r2, 0xffffffffffffffff]}, 0x80) ioctl$EXT4_IOC_CLEAR_ES_CACHE(r4, 0x6628) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000380)={0x0, 0x0, 0xbe0f, 0x9ffffc}) 19:47:07 executing program 0: socketpair$unix(0x1, 0x0, 0x0, 0x0) connect$unix(0xffffffffffffffff, &(0x7f000057eff8)=@file={0x0, './file0\x00'}, 0x6e) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0xf03affff) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x8000000000004) (async) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x8000000000004) socket$inet6(0xa, 0x800000000000002, 0x0) (async) r0 = socket$inet6(0xa, 0x800000000000002, 0x0) connect$inet6(r0, &(0x7f0000000080)={0xa, 0x0, 0x0, @private0}, 0x1c) r1 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000080)={&(0x7f0000000040)='sched_switch\x00'}, 0x10) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000001c0)='sched_process_wait\x00'}, 0x10) (async) r2 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000001c0)='sched_process_wait\x00'}, 0x10) bpf$ITER_CREATE(0xb, &(0x7f0000000100)={r2}, 0x8) (async) r3 = bpf$ITER_CREATE(0xb, &(0x7f0000000100)={r2}, 0x8) write$cgroup_int(r3, &(0x7f00000001c0), 0xfffffdef) r4 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPSET_CMD_CREATE(r4, &(0x7f0000001080)={0x0, 0x0, &(0x7f0000000080)={&(0x7f0000000000)={0x60, 0x2, 0x6, 0x1, 0x0, 0x0, {}, [@IPSET_ATTR_TYPENAME={0xe, 0x3, 'bitmap:ip\x00'}, @IPSET_ATTR_REVISION={0x5}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz0\x00'}, @IPSET_ATTR_DATA={0x18, 0x7, 0x0, 0x1, [@IPSET_ATTR_IP={0xc, 0x1, 0x0, 0x1, @IPSET_ATTR_IPADDR_IPV4={0x8, 0x1, 0x1, 0x0, @empty=0x80ffffaf}}, @IPSET_ATTR_CADT_FLAGS={0x8, 0x6, 0x0}]}, @IPSET_ATTR_FAMILY={0x5, 0x5, 0x2}, @IPSET_ATTR_PROTOCOL={0x5, 0x1, 0x6}]}, 0x60}}, 0x0) sendmmsg$inet6(r0, &(0x7f0000000f40)=[{{0x0, 0xcb000000, 0x0}}], 0x28000, 0x80fe) (async) sendmmsg$inet6(r0, &(0x7f0000000f40)=[{{0x0, 0xcb000000, 0x0}}], 0x28000, 0x80fe) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r6 = socket$nl_sock_diag(0x10, 0x3, 0x4) sendfile(r6, 0xffffffffffffffff, 0x0, 0x21fd1ee9) (async) sendfile(r6, 0xffffffffffffffff, 0x0, 0x21fd1ee9) sendfile(r1, r6, &(0x7f0000000380)=0x6, 0x0) (async) sendfile(r1, r6, &(0x7f0000000380)=0x6, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) sendfile(r7, r5, 0x0, 0xf03affff) sendfile(r7, r5, 0x0, 0x8000000000004) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_int(r9, 0x0, 0x0) sendfile(r9, r8, 0x0, 0xf03affff) r10 = bpf$ITER_CREATE(0x21, &(0x7f00000000c0)={r7}, 0x8) write$binfmt_script(r10, &(0x7f00000003c0)=ANY=[@ANYBLOB="2321202e2f66696c65302f66696c653020267d5d405e2c3a40278e2f200020275c2e2e2e275b402f207929206d656d6f72792e6576656e7473002073797a30000a5dc783382d8c5ac86079b3b2cc148946339f58aaf6f30056bf499c9bae68f43044d0d652c3dba3bf9a58ce777416c0613914ea40a56676458a934807c304018891a8ee7169e6e507267a6f08de3424dff68bcaf9260822e80e2355fe35c531c641aa51a6e8cf3db361ac18aba7c3a91075bad721ded562c94a9dedee12776524cc2d79e1c831667b87b73489dd9d20ccd6310860bc47bbbf4e067d4e9c4aebd77cff5a3d46ce5a1406cad56127b72ef3409660a8e0d4218c16d6beb17efa63ffd88d3f8b48ba49f7de1c5971aaf3d48cf12817312907768ac063212a2651c19025"], 0xef) (async) write$binfmt_script(r10, &(0x7f00000003c0)=ANY=[@ANYBLOB="2321202e2f66696c65302f66696c653020267d5d405e2c3a40278e2f200020275c2e2e2e275b402f207929206d656d6f72792e6576656e7473002073797a30000a5dc783382d8c5ac86079b3b2cc148946339f58aaf6f30056bf499c9bae68f43044d0d652c3dba3bf9a58ce777416c0613914ea40a56676458a934807c304018891a8ee7169e6e507267a6f08de3424dff68bcaf9260822e80e2355fe35c531c641aa51a6e8cf3db361ac18aba7c3a91075bad721ded562c94a9dedee12776524cc2d79e1c831667b87b73489dd9d20ccd6310860bc47bbbf4e067d4e9c4aebd77cff5a3d46ce5a1406cad56127b72ef3409660a8e0d4218c16d6beb17efa63ffd88d3f8b48ba49f7de1c5971aaf3d48cf12817312907768ac063212a2651c19025"], 0xef) mmap(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x4010, r9, 0x5e5ba000) [ 2633.649100][T26552] bond1187: entered promiscuous mode [ 2633.655029][T26552] 8021q: adding VLAN 0 to HW filter on device bond1187 19:47:07 executing program 3: setsockopt$IPT_SO_SET_REPLACE(0xffffffffffffffff, 0x0, 0x40, &(0x7f0000000080)=@nat={'nat\x00', 0x1b, 0x5, 0x628, 0x0, 0xb8, 0xffffffff, 0x0, 0xb8, 0x590, 0x590, 0xffffffff, 0x590, 0x590, 0x5, &(0x7f0000000000), {[{{@ip={@multicast1, @empty, 0xffffffff, 0xffffff00, 'vcan0\x00', 'bond0\x00', {}, {0xff}, 0x11, 0x1, 0x8}, 0x0, 0x70, 0xb8}, @unspec=@DNAT1={0x48, 'DNAT\x00', 0x1, {0x4, @ipv4=@local, @ipv4=@multicast1, @port=0x4e20, @icmp_id=0x68}}}, {{@ip={@loopback, @rand_addr=0x64010102, 0x0, 0xffffff00, 'bridge0\x00', 'gretap0\x00', {}, {}, 0x2e, 0x1}, 0x0, 0x1b8, 0x1f0, 0x0, {}, [@common=@icmp={{0x28}, {0x3, "b201"}}, @common=@unspec=@comment={{0x120}}]}, @REDIRECT={0x38, 'REDIRECT\x00', 0x0, {0x1, {0x7, @rand_addr=0x64010102, @broadcast, @gre_key=0x8, @icmp_id=0x64}}}}, {{@ip={@local, @local, 0xff000000, 0xffffff00, 'veth1_vlan\x00', 'nicvf0\x00', {}, {}, 0x4, 0x3}, 0x0, 0x1d8, 0x210, 0x0, {}, [@common=@inet=@sctp={{0x148}, {[0x4e23, 0x4e24], [0x4e24, 0x4e24], [0x7, 0x4, 0x0, 0x2, 0x1, 0x2, 0x0, 0x0, 0x2, 0x1, 0x2, 0x9, 0x9, 0x80000001, 0x1000000, 0xafc, 0x6, 0x947b4a8, 0x5, 0xfff, 0x3360, 0x401, 0x3, 0xae5c, 0x2, 0x5, 0x1f, 0x1, 0x8, 0x6, 0xfff, 0x8000, 0x9, 0x1ef9, 0x801a, 0x9, 0x8001, 0x4, 0x8001, 0x4, 0xc4, 0x3, 0x400, 0x1ff6, 0x81, 0x6, 0x7000, 0x6, 0x2, 0x8000, 0x1, 0x7fff, 0x7, 0x80, 0x3ff, 0x10001, 0xfffffff8, 0x556e, 0x3, 0x4, 0x3, 0x0, 0x3, 0x9], 0x1, [{0xb0, 0x1, 0x3}, {0xa9, 0x0, 0x4}, {0x0, 0x0, 0x20}, {0x1f, 0x6, 0x98}], 0x3, 0x3, 0x2}}, @common=@socket0={{0x20}}]}, @NETMAP={0x38, 'NETMAP\x00', 0x0, {0x1, {0x18, @multicast2, @multicast1, @gre_key=0x8, @gre_key=0x3}}}}, {{@ip={@rand_addr=0x64010102, @private=0xa010100, 0xff, 0xffffff00, 'team_slave_0\x00', 'nicvf0\x00', {0xff}, {}, 0x6, 0x1, 0x12}, 0x0, 0xa0, 0xd8, 0x0, {}, [@common=@addrtype={{0x30}, {0x800, 0x2, 0x1}}]}, @MASQUERADE={0x38, 'MASQUERADE\x00', 0x0, {0x1, {0xf, @empty, @remote, @port=0x4e20, @icmp_id=0x5}}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x688) [ 2633.878875][T26553] bond1187: (slave bridge1090): making interface the new active one [ 2633.906053][T26553] bridge1090: entered promiscuous mode 19:47:08 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x56120000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:08 executing program 3: r0 = socket$netlink(0x10, 0x3, 0xf) ioctl$FITRIM(r0, 0xc0185879, &(0x7f0000000000)={0x100, 0x46e8fdab, 0x1}) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) [ 2633.936490][T26553] bond1187: (slave bridge1090): Enslaving as an active interface with an up link [ 2633.958754][T26580] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:08 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='memory.events\x00', 0x26e1, 0x0) sendfile(r0, r0, &(0x7f00000000c0)=0x9, 0x8000000000000000) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='memory.events\x00', 0x7a05, 0x1700) ioctl$FS_IOC_SETFLAGS(r1, 0x40086602, &(0x7f0000000000)) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r3 = bpf$ITER_CREATE(0x21, &(0x7f0000000080), 0x8) (async, rerun: 32) openat$cgroup_ro(r2, &(0x7f0000000100)='blkio.bfq.time_recursive\x00', 0x0, 0x0) (async, rerun: 32) openat$cgroup_ro(r0, &(0x7f0000000040)='cgroup.kill\x00', 0x0, 0x0) (async, rerun: 64) write$cgroup_int(r2, &(0x7f0000000200), 0x806000) (rerun: 64) r4 = bpf$PROG_LOAD(0x5, &(0x7f00000013c0)={0x12, 0x10, &(0x7f0000001440)=@framed={{0x18, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x2}, [@alu={0x4, 0x0, 0xd, 0x8, 0x1, 0x18, 0x1}, @btf_id={0x18, 0xc, 0x3, 0x0, 0x2}, @jmp={0x5, 0x1, 0x1, 0x0, 0xa, 0xffffffffffffffaf, 0xfffffffffffffff0}, @call={0x85, 0x0, 0x0, 0x55}, @call={0x85, 0x0, 0x0, 0x8c}, @btf_id={0x18, 0x2, 0x3, 0x0, 0x1}, @cb_func={0x18, 0xb, 0x4, 0x0, 0xffffffffffffffff}, @cb_func={0x18, 0x2, 0x4, 0x0, 0x7}, @kfunc={0x85, 0x0, 0x2, 0x0, 0x5}]}, &(0x7f00000001c0)='GPL\x00', 0x2, 0x1000, &(0x7f00000003c0)=""/4096, 0x40f00, 0x16, '\x00', 0x0, 0x23, r0, 0x8, &(0x7f0000000240)={0x7, 0x3}, 0x8, 0x10, &(0x7f0000000280)={0x4, 0x10, 0x4, 0x7}, 0x10, 0xffffffffffffffff, r3, 0x0, &(0x7f0000000300)=[r2, 0xffffffffffffffff]}, 0x80) ioctl$EXT4_IOC_CLEAR_ES_CACHE(r4, 0x6628) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000380)={0x0, 0x0, 0xbe0f, 0x9ffffc}) 19:47:08 executing program 3: r0 = socket$netlink(0x10, 0x3, 0xf) ioctl$FITRIM(r0, 0xc0185879, &(0x7f0000000000)={0x100, 0x46e8fdab, 0x1}) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) 19:47:08 executing program 0: unshare(0x6c060000) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xe, 0x8031, 0xffffffffffffffff, 0x0) pipe(&(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) vmsplice(r1, &(0x7f0000000080)=[{&(0x7f0000000180)="bad75e693175ff4ccfc4", 0xfffffec5}, {&(0x7f0000000300)="76df8cfefa7ddac22dcfcf9e00c693a47524fbfa60133f4bf9500ca94f344e4dd28a9ec9014befb0ea01a514ea8c7c66e5e88e77a778affc2a2030f7fbfa96fe6c14a0c75aee26637a81e3bcb693e7513645eb550c7adad4103ebae473f42372227fa59722490bdd9f2774cffe4bd181ffffffffffffffc3788e745965108c770cc26d1936bcc660d3ccbf2feaeb4603347ab0", 0x93}], 0x2, 0x8) close(r1) socket$nl_route(0x10, 0x3, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000001c0)={0x28, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @val={0xc, 0x99, {0x938, 0x78}}}}}, 0x28}}, 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) r6 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r5, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r5, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r6, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r7}, @void}}}, 0x1c}}, 0x0) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(0xffffffffffffffff, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r8, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r9}, @void}}}, 0x1c}}, 0x0) r10 = socket$nl_generic(0x10, 0x3, 0x10) r11 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r10, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r10, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r11, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r12}, @void}}}, 0x1c}}, 0x0) sendmsg$TIPC_NL_KEY_FLUSH(r5, &(0x7f0000000000)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000100)=ANY=[@ANYRES32, @ANYRESHEX=r12], 0x60}}, 0x8000) write(0xffffffffffffffff, &(0x7f0000000180)="220000001400256304000000000000040208031301000000080002", 0x1b) splice(r0, 0x0, r1, 0x0, 0x100000000, 0x0) r13 = socket$netlink(0x10, 0x3, 0x0) r14 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(0xffffffffffffffff, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r14, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r15}, @void}}}, 0x1c}}, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(0xffffffffffffffff, 0x40086607, &(0x7f0000000140)=0x6) writev(r13, &(0x7f00000003c0)=[{&(0x7f0000000180)="390000001300034700bb65e1c3e4ffff01000000010000005600000025000000190004000400000007fd17e5ffff0800040000000000000000", 0x39}], 0x1) [ 2634.184853][T26580] bond1226: entered promiscuous mode [ 2634.213039][T26580] 8021q: adding VLAN 0 to HW filter on device bond1226 [ 2634.508626][T26585] bond1226: (slave bridge1156): making interface the new active one [ 2634.534252][T26585] bridge1156: entered promiscuous mode [ 2634.561204][T26585] bond1226: (slave bridge1156): Enslaving as an active interface with an up link [ 2634.591069][T26586] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:08 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x57120000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:08 executing program 3: r0 = socket$netlink(0x10, 0x3, 0xf) ioctl$FITRIM(r0, 0xc0185879, &(0x7f0000000000)={0x100, 0x46e8fdab, 0x1}) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) 19:47:08 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x658d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:08 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r9 = socket$nl_generic(0x10, 0x3, 0x10) r10 = syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) sendmsg$TIPC_CMD_RESET_LINK_STATS(r9, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000140)={0x30, r10, 0x1, 0x0, 0x0, {{}, {}, {0x3, 0x14, 'broadcast-link\x00'}}}, 0x30}}, 0x0) 19:47:08 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000000)=ANY=[@ANYBLOB="00000a7bd4f801ead0d4"], 0xa) [ 2634.635842][T26586] workqueue: Failed to create a rescuer kthread for wq "bond1111": -EINTR [ 2634.806174][T26604] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2634.899184][T26604] bond1188: entered promiscuous mode [ 2634.905090][T26604] 8021q: adding VLAN 0 to HW filter on device bond1188 19:47:09 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x57120000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2634.978179][T26605] bond1188: (slave bridge1091): making interface the new active one [ 2634.986377][T26605] bridge1091: entered promiscuous mode [ 2635.000001][T26605] bond1188: (slave bridge1091): Enslaving as an active interface with an up link 19:47:09 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) write$tun(0xffffffffffffffff, &(0x7f0000000000)=ANY=[@ANYBLOB="00000a7bd4f801ead0d4"], 0xa) [ 2635.123653][T26625] bond1227: entered promiscuous mode [ 2635.129372][T26625] 8021q: adding VLAN 0 to HW filter on device bond1227 [ 2635.276595][T26628] bond1227: (slave bridge1157): making interface the new active one [ 2635.286668][T26628] bridge1157: entered promiscuous mode [ 2635.301897][T26628] bond1227: (slave bridge1157): Enslaving as an active interface with an up link [ 2635.311650][T26631] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. 19:47:09 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x60000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:09 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000000)=ANY=[@ANYBLOB="00000a7bd4f801ead0d4"], 0xa) [ 2635.329290][T26632] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2635.361089][T26634] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2635.476152][T26637] bond1189: entered promiscuous mode [ 2635.484295][T26637] 8021q: adding VLAN 0 to HW filter on device bond1189 19:47:09 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) (async) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) (async) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) (async) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) (async) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) (async) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) (async) socket$rxrpc(0x21, 0x2, 0xa) (async) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) (async) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) (async) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) (async) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) (async) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) (async) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r9 = socket$nl_generic(0x10, 0x3, 0x10) (async) r10 = syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) sendmsg$TIPC_CMD_RESET_LINK_STATS(r9, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000140)={0x30, r10, 0x1, 0x0, 0x0, {{}, {}, {0x3, 0x14, 'broadcast-link\x00'}}}, 0x30}}, 0x0) 19:47:09 executing program 3: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$xdp(0x2c, 0x3, 0x0) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r1, 0x8982, &(0x7f0000000040)={0x1, 'veth1_to_team\x00', {}, 0x3}) r2 = socket(0x10, 0x3, 0x0) r3 = socket$nl_route(0x10, 0x3, 0x0) r4 = socket(0x10, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f00000000c0), r4) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000000)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000200)=ANY=[@ANYBLOB="3c0000001000010400eeffff11ffffffff000000", @ANYRES32=r5, @ANYBLOB="01000000010000001c0012000c000100627269646765", @ANYRES8=r0], 0x3c}}, 0x0) sendmsg$nl_route_sched(r2, &(0x7f0000005840)={0x0, 0x0, &(0x7f00000005c0)={&(0x7f0000001240)=ANY=[@ANYBLOB="48000000240051860000000000ff000000000000", @ANYRES32=r5, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000240)=@newtfilter={0x338, 0x2c, 0xd27, 0x0, 0x0, {0x60, 0x0, 0x0, r5, {}, {}, {0xe}}, [@filter_kind_options=@f_basic={{0xa}, {0x308, 0x2, [@TCA_BASIC_EMATCHES={0x304, 0x2, 0x0, 0x1, [@TCA_EMATCH_TREE_LIST={0x30, 0x2, 0x0, 0x1, [@TCF_EM_IPT={0x2c, 0x1, 0x0, 0x0, {{0x0, 0x5}, [@TCA_EM_IPT_HOOK={0x8}, @TCA_EM_IPT_MATCH_NAME={0xb}, @TCA_EM_IPT_MATCH_DATA={0x4, 0x10}, @TCA_EM_IPT_HOOK={0x8}]}}]}, @TCA_EMATCH_TREE_LIST={0x2d0, 0x2, 0x0, 0x1, [@TCF_EM_META={0xe4, 0x3, 0x0, 0x0, {{0x40, 0x4, 0x1}, [@TCA_EM_META_RVALUE={0x18, 0x3, [@TCF_META_TYPE_VAR="8957b69a", @TCF_META_TYPE_INT=0x3, @TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_VAR="8b49d56d4d95ff51"]}, @TCA_EM_META_LVALUE={0x34, 0x2, [@TCF_META_TYPE_VAR="b95afb6a58937134f9ae", @TCF_META_TYPE_INT, @TCF_META_TYPE_INT=0x5, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_INT=0x5, @TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_INT=0x6, @TCF_META_TYPE_VAR="16f1b5ed7dff08a110cd", @TCF_META_TYPE_INT=0x5]}, @TCA_EM_META_RVALUE={0x1d, 0x3, [@TCF_META_TYPE_VAR="22a18ba81cd821c8", @TCF_META_TYPE_INT=0x6, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_VAR="552bd9009ac6a3d940"]}, @TCA_EM_META_HDR={0xc, 0x1, {{0x7f, 0x80, 0x1}, {0x80, 0x9}}}, @TCA_EM_META_RVALUE={0xd, 0x3, [@TCF_META_TYPE_VAR="1fd87bad14b66df29f"]}, @TCA_EM_META_HDR={0xc, 0x1, {{0x4, 0x3, 0x2}, {0x4, 0x9, 0x2}}}, @TCA_EM_META_HDR={0xc, 0x1, {{0x100, 0xea, 0x2}, {0x2, 0x1, 0x3}}}, @TCA_EM_META_HDR={0xc, 0x1, {{0x0, 0x4, 0x2}, {0x8, 0x80, 0x2}}}, @TCA_EM_META_LVALUE={0x2b, 0x2, [@TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_INT=0x8, @TCF_META_TYPE_VAR="c9ba83989cf6", @TCF_META_TYPE_VAR="a2c7a29cb7", @TCF_META_TYPE_VAR="74af77934008", @TCF_META_TYPE_VAR="39676b98e6629763909e"]}]}}, @TCF_EM_CONTAINER={0x104, 0x3, 0x0, 0x0, {{0x593, 0x0, 0x9}, "a02a9bcb03e5effa10fa6f43ce24816b19a130f50ade84c5ae939968ecad378ff4a4cc0cb162759fec11d9134b5768537b1eb3e8c367be00060f6f9b9e42e18754587db1ccfb5d37cc37d4be5b36558623a3d745e18d95580416388b0ac9c21f6e25b8b5ac4a1c090b4cf0b433cff655c92b9c88a022392b68ede6058a8f2cf030cb3f16d2f60afdd81f2244455ddeef0beb0ce65afbad0b5ffc460e451e72ec033e3ded41e246d557162dad994b49f8592959bb10adc20410a8e310d3dfa9d3187efb0a1607297e9ccdbede68db55035906e98beefde43a4cb98041449a60aaf46f1ad027b13c48eea72d004878561162bd53c821a950"}}, @TCF_EM_IPT={0x1c, 0x1, 0x0, 0x0, {{0x200, 0x9, 0x100}, [@TCA_EM_IPT_MATCH_REVISION={0x5, 0x3, 0x1b}, @TCA_EM_IPT_HOOK={0x8, 0x1, 0x1}]}}, @TCF_EM_CMP={0x18, 0x3, 0x0, 0x0, {{0x6, 0x1, 0x100}, {0x3f, 0x1, 0x9, 0x1, 0x1, 0x0, 0x1}}}, @TCF_EM_CONTAINER={0xb0, 0x1, 0x0, 0x0, {{0x4, 0x0, 0x7}, "5141bccdc8f57f04e0b40c9f0816d5000c0c06579b5e74a388dd436e0cbf8037a3e25d226a155c2ad6c6cd9fe644979f7c97f67dc2190cf26353fe0505d8acc521ca26f35edde457492437d055d0b26f4fd8120647e91367733bddf11be0cecd74d88e10243ee6106deef50a06e19e1889e43d76a70e7e7960729fc72d431b90ac0431c8dfa660cc7d9c15820315242146bbe1c61767d4a6cc6ddc2cd84703f6ee99b756"}}]}]}]}}]}, 0x338}}, 0x0) syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000200080022eb07000000000000000200000000000000000000000800655800000000"], 0x0) [ 2635.677061][T26644] bond1189: (slave bridge1092): making interface the new active one [ 2635.704261][T26644] bridge1092: entered promiscuous mode [ 2635.749543][T26644] bond1189: (slave bridge1092): Enslaving as an active interface with an up link [ 2635.777218][T26643] validate_nla: 2 callbacks suppressed [ 2635.777242][T26643] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2635.874494][T26643] bond1111: entered promiscuous mode [ 2635.880213][T26643] 8021q: adding VLAN 0 to HW filter on device bond1111 [ 2636.036549][T26645] bond1111: (slave bridge1041): making interface the new active one [ 2636.049813][T26645] bridge1041: entered promiscuous mode [ 2636.061619][T26645] bond1111: (slave bridge1041): Enslaving as an active interface with an up link [ 2636.104384][T26650] netlink: 2 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2636.136261][T26655] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2636.222171][T26655] bond1228: entered promiscuous mode [ 2636.243909][T26655] 8021q: adding VLAN 0 to HW filter on device bond1228 [ 2636.275762][T26657] netlink: 'syz-executor.0': attribute type 4 has an invalid length. [ 2636.467453][T26660] bond1228: (slave bridge1158): making interface the new active one [ 2636.477964][T26660] bridge1158: entered promiscuous mode [ 2636.491914][T26660] bond1228: (slave bridge1158): Enslaving as an active interface with an up link [ 2636.545903][T26664] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2636.557943][T26665] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2636.582584][T26670] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2636.658155][T26671] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. 19:47:12 executing program 0: unshare(0x6c060000) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xe, 0x8031, 0xffffffffffffffff, 0x0) (async) pipe(&(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) vmsplice(r1, &(0x7f0000000080)=[{&(0x7f0000000180)="bad75e693175ff4ccfc4", 0xfffffec5}, {&(0x7f0000000300)="76df8cfefa7ddac22dcfcf9e00c693a47524fbfa60133f4bf9500ca94f344e4dd28a9ec9014befb0ea01a514ea8c7c66e5e88e77a778affc2a2030f7fbfa96fe6c14a0c75aee26637a81e3bcb693e7513645eb550c7adad4103ebae473f42372227fa59722490bdd9f2774cffe4bd181ffffffffffffffc3788e745965108c770cc26d1936bcc660d3ccbf2feaeb4603347ab0", 0x93}], 0x2, 0x8) (async) close(r1) (async) socket$nl_route(0x10, 0x3, 0x0) (async) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000001c0)={0x28, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @val={0xc, 0x99, {0x938, 0x78}}}}}, 0x28}}, 0x0) (async) r5 = socket$nl_generic(0x10, 0x3, 0x10) r6 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r5, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r5, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r6, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r7}, @void}}}, 0x1c}}, 0x0) (async) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(0xffffffffffffffff, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r8, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r9}, @void}}}, 0x1c}}, 0x0) r10 = socket$nl_generic(0x10, 0x3, 0x10) r11 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r10, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r10, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r11, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r12}, @void}}}, 0x1c}}, 0x0) (async) sendmsg$TIPC_NL_KEY_FLUSH(r5, &(0x7f0000000000)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000100)=ANY=[@ANYRES32, @ANYRESHEX=r12], 0x60}}, 0x8000) (async) write(0xffffffffffffffff, &(0x7f0000000180)="220000001400256304000000000000040208031301000000080002", 0x1b) (async) splice(r0, 0x0, r1, 0x0, 0x100000000, 0x0) (async) r13 = socket$netlink(0x10, 0x3, 0x0) (async) r14 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(0xffffffffffffffff, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r14, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r15}, @void}}}, 0x1c}}, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(0xffffffffffffffff, 0x40086607, &(0x7f0000000140)=0x6) (async) writev(r13, &(0x7f00000003c0)=[{&(0x7f0000000180)="390000001300034700bb65e1c3e4ffff01000000010000005600000025000000190004000400000007fd17e5ffff0800040000000000000000", 0x39}], 0x1) 19:47:12 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x59030000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:12 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x668d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:12 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x65580000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:12 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) (async) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) (async) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) (async) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) (async) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) (async) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) (async) r9 = socket$nl_generic(0x10, 0x3, 0x10) (async) r10 = syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) sendmsg$TIPC_CMD_RESET_LINK_STATS(r9, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000140)={0x30, r10, 0x1, 0x0, 0x0, {{}, {}, {0x3, 0x14, 'broadcast-link\x00'}}}, 0x30}}, 0x0) 19:47:12 executing program 3: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$xdp(0x2c, 0x3, 0x0) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r1, 0x8982, &(0x7f0000000040)={0x1, 'veth1_to_team\x00', {}, 0x3}) (async) r2 = socket(0x10, 0x3, 0x0) (async) r3 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 64) r4 = socket(0x10, 0x803, 0x0) (rerun: 64) syz_genetlink_get_family_id$mptcp(&(0x7f00000000c0), r4) (async) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000000)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000200)=ANY=[@ANYBLOB="3c0000001000010400eeffff11ffffffff000000", @ANYRES32=r5, @ANYBLOB="01000000010000001c0012000c000100627269646765", @ANYRES8=r0], 0x3c}}, 0x0) (async) sendmsg$nl_route_sched(r2, &(0x7f0000005840)={0x0, 0x0, &(0x7f00000005c0)={&(0x7f0000001240)=ANY=[@ANYBLOB="48000000240051860000000000ff000000000000", @ANYRES32=r5, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000240)=@newtfilter={0x338, 0x2c, 0xd27, 0x0, 0x0, {0x60, 0x0, 0x0, r5, {}, {}, {0xe}}, [@filter_kind_options=@f_basic={{0xa}, {0x308, 0x2, [@TCA_BASIC_EMATCHES={0x304, 0x2, 0x0, 0x1, [@TCA_EMATCH_TREE_LIST={0x30, 0x2, 0x0, 0x1, [@TCF_EM_IPT={0x2c, 0x1, 0x0, 0x0, {{0x0, 0x5}, [@TCA_EM_IPT_HOOK={0x8}, @TCA_EM_IPT_MATCH_NAME={0xb}, @TCA_EM_IPT_MATCH_DATA={0x4, 0x10}, @TCA_EM_IPT_HOOK={0x8}]}}]}, @TCA_EMATCH_TREE_LIST={0x2d0, 0x2, 0x0, 0x1, [@TCF_EM_META={0xe4, 0x3, 0x0, 0x0, {{0x40, 0x4, 0x1}, [@TCA_EM_META_RVALUE={0x18, 0x3, [@TCF_META_TYPE_VAR="8957b69a", @TCF_META_TYPE_INT=0x3, @TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_VAR="8b49d56d4d95ff51"]}, @TCA_EM_META_LVALUE={0x34, 0x2, [@TCF_META_TYPE_VAR="b95afb6a58937134f9ae", @TCF_META_TYPE_INT, @TCF_META_TYPE_INT=0x5, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_INT=0x5, @TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_INT=0x6, @TCF_META_TYPE_VAR="16f1b5ed7dff08a110cd", @TCF_META_TYPE_INT=0x5]}, @TCA_EM_META_RVALUE={0x1d, 0x3, [@TCF_META_TYPE_VAR="22a18ba81cd821c8", @TCF_META_TYPE_INT=0x6, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_VAR="552bd9009ac6a3d940"]}, @TCA_EM_META_HDR={0xc, 0x1, {{0x7f, 0x80, 0x1}, {0x80, 0x9}}}, @TCA_EM_META_RVALUE={0xd, 0x3, [@TCF_META_TYPE_VAR="1fd87bad14b66df29f"]}, @TCA_EM_META_HDR={0xc, 0x1, {{0x4, 0x3, 0x2}, {0x4, 0x9, 0x2}}}, @TCA_EM_META_HDR={0xc, 0x1, {{0x100, 0xea, 0x2}, {0x2, 0x1, 0x3}}}, @TCA_EM_META_HDR={0xc, 0x1, {{0x0, 0x4, 0x2}, {0x8, 0x80, 0x2}}}, @TCA_EM_META_LVALUE={0x2b, 0x2, [@TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_INT=0x8, @TCF_META_TYPE_VAR="c9ba83989cf6", @TCF_META_TYPE_VAR="a2c7a29cb7", @TCF_META_TYPE_VAR="74af77934008", @TCF_META_TYPE_VAR="39676b98e6629763909e"]}]}}, @TCF_EM_CONTAINER={0x104, 0x3, 0x0, 0x0, {{0x593, 0x0, 0x9}, "a02a9bcb03e5effa10fa6f43ce24816b19a130f50ade84c5ae939968ecad378ff4a4cc0cb162759fec11d9134b5768537b1eb3e8c367be00060f6f9b9e42e18754587db1ccfb5d37cc37d4be5b36558623a3d745e18d95580416388b0ac9c21f6e25b8b5ac4a1c090b4cf0b433cff655c92b9c88a022392b68ede6058a8f2cf030cb3f16d2f60afdd81f2244455ddeef0beb0ce65afbad0b5ffc460e451e72ec033e3ded41e246d557162dad994b49f8592959bb10adc20410a8e310d3dfa9d3187efb0a1607297e9ccdbede68db55035906e98beefde43a4cb98041449a60aaf46f1ad027b13c48eea72d004878561162bd53c821a950"}}, @TCF_EM_IPT={0x1c, 0x1, 0x0, 0x0, {{0x200, 0x9, 0x100}, [@TCA_EM_IPT_MATCH_REVISION={0x5, 0x3, 0x1b}, @TCA_EM_IPT_HOOK={0x8, 0x1, 0x1}]}}, @TCF_EM_CMP={0x18, 0x3, 0x0, 0x0, {{0x6, 0x1, 0x100}, {0x3f, 0x1, 0x9, 0x1, 0x1, 0x0, 0x1}}}, @TCF_EM_CONTAINER={0xb0, 0x1, 0x0, 0x0, {{0x4, 0x0, 0x7}, "5141bccdc8f57f04e0b40c9f0816d5000c0c06579b5e74a388dd436e0cbf8037a3e25d226a155c2ad6c6cd9fe644979f7c97f67dc2190cf26353fe0505d8acc521ca26f35edde457492437d055d0b26f4fd8120647e91367733bddf11be0cecd74d88e10243ee6106deef50a06e19e1889e43d76a70e7e7960729fc72d431b90ac0431c8dfa660cc7d9c15820315242146bbe1c61767d4a6cc6ddc2cd84703f6ee99b756"}}]}]}]}}]}, 0x338}}, 0x0) syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000200080022eb07000000000000000200000000000000000000000800655800000000"], 0x0) [ 2638.415203][T26684] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:12 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r9 = socket$nl_generic(0x10, 0x3, 0x10) r10 = syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) sendmsg$TIPC_CMD_RESET_LINK_STATS(r9, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000140)={0x30, r10, 0x1, 0x0, 0x0, {{}, {}, {0x3, 0x14, 'broadcast-link\x00'}}}, 0x30}}, 0x0) [ 2638.560554][T26684] bond1229: entered promiscuous mode [ 2638.596448][T26684] 8021q: adding VLAN 0 to HW filter on device bond1229 [ 2638.679965][T26687] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:12 executing program 3: r0 = socket$nl_route(0x10, 0x3, 0x0) (async) r1 = socket$xdp(0x2c, 0x3, 0x0) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r1, 0x8982, &(0x7f0000000040)={0x1, 'veth1_to_team\x00', {}, 0x3}) (async) r2 = socket(0x10, 0x3, 0x0) (async) r3 = socket$nl_route(0x10, 0x3, 0x0) (async) r4 = socket(0x10, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f00000000c0), r4) (async, rerun: 32) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (rerun: 32) sendmsg$nl_route(r3, &(0x7f0000000000)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000200)=ANY=[@ANYBLOB="3c0000001000010400eeffff11ffffffff000000", @ANYRES32=r5, @ANYBLOB="01000000010000001c0012000c000100627269646765", @ANYRES8=r0], 0x3c}}, 0x0) (async, rerun: 64) sendmsg$nl_route_sched(r2, &(0x7f0000005840)={0x0, 0x0, &(0x7f00000005c0)={&(0x7f0000001240)=ANY=[@ANYBLOB="48000000240051860000000000ff000000000000", @ANYRES32=r5, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) (async, rerun: 64) sendmsg$nl_route_sched(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000240)=@newtfilter={0x338, 0x2c, 0xd27, 0x0, 0x0, {0x60, 0x0, 0x0, r5, {}, {}, {0xe}}, [@filter_kind_options=@f_basic={{0xa}, {0x308, 0x2, [@TCA_BASIC_EMATCHES={0x304, 0x2, 0x0, 0x1, [@TCA_EMATCH_TREE_LIST={0x30, 0x2, 0x0, 0x1, [@TCF_EM_IPT={0x2c, 0x1, 0x0, 0x0, {{0x0, 0x5}, [@TCA_EM_IPT_HOOK={0x8}, @TCA_EM_IPT_MATCH_NAME={0xb}, @TCA_EM_IPT_MATCH_DATA={0x4, 0x10}, @TCA_EM_IPT_HOOK={0x8}]}}]}, @TCA_EMATCH_TREE_LIST={0x2d0, 0x2, 0x0, 0x1, [@TCF_EM_META={0xe4, 0x3, 0x0, 0x0, {{0x40, 0x4, 0x1}, [@TCA_EM_META_RVALUE={0x18, 0x3, [@TCF_META_TYPE_VAR="8957b69a", @TCF_META_TYPE_INT=0x3, @TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_VAR="8b49d56d4d95ff51"]}, @TCA_EM_META_LVALUE={0x34, 0x2, [@TCF_META_TYPE_VAR="b95afb6a58937134f9ae", @TCF_META_TYPE_INT, @TCF_META_TYPE_INT=0x5, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_INT=0x5, @TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_INT=0x6, @TCF_META_TYPE_VAR="16f1b5ed7dff08a110cd", @TCF_META_TYPE_INT=0x5]}, @TCA_EM_META_RVALUE={0x1d, 0x3, [@TCF_META_TYPE_VAR="22a18ba81cd821c8", @TCF_META_TYPE_INT=0x6, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_VAR="552bd9009ac6a3d940"]}, @TCA_EM_META_HDR={0xc, 0x1, {{0x7f, 0x80, 0x1}, {0x80, 0x9}}}, @TCA_EM_META_RVALUE={0xd, 0x3, [@TCF_META_TYPE_VAR="1fd87bad14b66df29f"]}, @TCA_EM_META_HDR={0xc, 0x1, {{0x4, 0x3, 0x2}, {0x4, 0x9, 0x2}}}, @TCA_EM_META_HDR={0xc, 0x1, {{0x100, 0xea, 0x2}, {0x2, 0x1, 0x3}}}, @TCA_EM_META_HDR={0xc, 0x1, {{0x0, 0x4, 0x2}, {0x8, 0x80, 0x2}}}, @TCA_EM_META_LVALUE={0x2b, 0x2, [@TCF_META_TYPE_INT=0x2, @TCF_META_TYPE_INT=0x7, @TCF_META_TYPE_INT=0x8, @TCF_META_TYPE_VAR="c9ba83989cf6", @TCF_META_TYPE_VAR="a2c7a29cb7", @TCF_META_TYPE_VAR="74af77934008", @TCF_META_TYPE_VAR="39676b98e6629763909e"]}]}}, @TCF_EM_CONTAINER={0x104, 0x3, 0x0, 0x0, {{0x593, 0x0, 0x9}, "a02a9bcb03e5effa10fa6f43ce24816b19a130f50ade84c5ae939968ecad378ff4a4cc0cb162759fec11d9134b5768537b1eb3e8c367be00060f6f9b9e42e18754587db1ccfb5d37cc37d4be5b36558623a3d745e18d95580416388b0ac9c21f6e25b8b5ac4a1c090b4cf0b433cff655c92b9c88a022392b68ede6058a8f2cf030cb3f16d2f60afdd81f2244455ddeef0beb0ce65afbad0b5ffc460e451e72ec033e3ded41e246d557162dad994b49f8592959bb10adc20410a8e310d3dfa9d3187efb0a1607297e9ccdbede68db55035906e98beefde43a4cb98041449a60aaf46f1ad027b13c48eea72d004878561162bd53c821a950"}}, @TCF_EM_IPT={0x1c, 0x1, 0x0, 0x0, {{0x200, 0x9, 0x100}, [@TCA_EM_IPT_MATCH_REVISION={0x5, 0x3, 0x1b}, @TCA_EM_IPT_HOOK={0x8, 0x1, 0x1}]}}, @TCF_EM_CMP={0x18, 0x3, 0x0, 0x0, {{0x6, 0x1, 0x100}, {0x3f, 0x1, 0x9, 0x1, 0x1, 0x0, 0x1}}}, @TCF_EM_CONTAINER={0xb0, 0x1, 0x0, 0x0, {{0x4, 0x0, 0x7}, "5141bccdc8f57f04e0b40c9f0816d5000c0c06579b5e74a388dd436e0cbf8037a3e25d226a155c2ad6c6cd9fe644979f7c97f67dc2190cf26353fe0505d8acc521ca26f35edde457492437d055d0b26f4fd8120647e91367733bddf11be0cecd74d88e10243ee6106deef50a06e19e1889e43d76a70e7e7960729fc72d431b90ac0431c8dfa660cc7d9c15820315242146bbe1c61767d4a6cc6ddc2cd84703f6ee99b756"}}]}]}]}}]}, 0x338}}, 0x0) syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000200080022eb07000000000000000200000000000000000000000800655800000000"], 0x0) [ 2638.766777][T26687] bond1112: entered promiscuous mode [ 2638.773469][T26687] 8021q: adding VLAN 0 to HW filter on device bond1112 [ 2638.792004][T26688] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2638.995587][T26688] bond1190: entered promiscuous mode [ 2639.016534][T26688] 8021q: adding VLAN 0 to HW filter on device bond1190 19:47:13 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x65e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2639.123170][T26699] bond1229: (slave bridge1159): making interface the new active one [ 2639.134048][T26699] bridge1159: entered promiscuous mode [ 2639.151200][T26699] bond1229: (slave bridge1159): Enslaving as an active interface with an up link [ 2639.272058][T26700] bond1112: (slave bridge1042): making interface the new active one [ 2639.287159][T26700] bridge1042: entered promiscuous mode [ 2639.306066][T26700] bond1112: (slave bridge1042): Enslaving as an active interface with an up link 19:47:13 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x678d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:13 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x60000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2639.432365][T26702] bond1190: (slave bridge1093): making interface the new active one [ 2639.447066][T26702] bridge1093: entered promiscuous mode [ 2639.460549][T26702] bond1190: (slave bridge1093): Enslaving as an active interface with an up link [ 2639.527280][T26707] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2639.548480][T26709] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. 19:47:13 executing program 3: getsockopt$inet_sctp_SCTP_GET_ASSOC_STATS(0xffffffffffffffff, 0x84, 0x70, &(0x7f0000000180)={0x0, @in6={{0xa, 0x4e22, 0x2, @local, 0x1}}, [0x8, 0x10000, 0x7fffffff, 0x9, 0x1, 0x5, 0x6, 0x5, 0x1, 0x8, 0x7f, 0x7fffffffffffffff, 0x8000000000000000, 0x4, 0xffffffffffffffff]}, &(0x7f0000000000)=0x100) ioctl$EXT4_IOC_GETSTATE(0xffffffffffffffff, 0x40046629, &(0x7f0000000040)) sendmsg$NL80211_CMD_RELOAD_REGDB(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000080), 0xc, &(0x7f00000002c0)={&(0x7f0000000280)={0x14, 0x0, 0x20, 0x70bd2d, 0x25dfdbfc, {}, ["", ""]}, 0x14}}, 0x4000) [ 2639.596438][T26722] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2639.701655][T26722] bond1230: entered promiscuous mode [ 2639.708589][T26722] 8021q: adding VLAN 0 to HW filter on device bond1230 [ 2639.791521][T26724] bond1230: (slave bridge1160): making interface the new active one [ 2639.801605][T26724] bridge1160: entered promiscuous mode [ 2639.818131][T26724] bond1230: (slave bridge1160): Enslaving as an active interface with an up link [ 2639.842173][T26726] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2639.905032][T26726] bond1113: entered promiscuous mode [ 2639.910810][T26726] 8021q: adding VLAN 0 to HW filter on device bond1113 [ 2639.988018][T26727] bond1113: (slave bridge1043): making interface the new active one [ 2639.999352][T26727] bridge1043: entered promiscuous mode [ 2640.011681][T26727] bond1113: (slave bridge1043): Enslaving as an active interface with an up link [ 2640.021492][T26729] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2640.162241][T26729] bond1191: entered promiscuous mode [ 2640.168686][T26729] 8021q: adding VLAN 0 to HW filter on device bond1191 [ 2640.233310][T26730] bond1191: (slave bridge1094): making interface the new active one [ 2640.241391][T26730] bridge1094: entered promiscuous mode [ 2640.251959][T26730] bond1191: (slave bridge1094): Enslaving as an active interface with an up link 19:47:17 executing program 0: unshare(0x6c060000) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0xe, 0x8031, 0xffffffffffffffff, 0x0) (async) pipe(&(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) vmsplice(r1, &(0x7f0000000080)=[{&(0x7f0000000180)="bad75e693175ff4ccfc4", 0xfffffec5}, {&(0x7f0000000300)="76df8cfefa7ddac22dcfcf9e00c693a47524fbfa60133f4bf9500ca94f344e4dd28a9ec9014befb0ea01a514ea8c7c66e5e88e77a778affc2a2030f7fbfa96fe6c14a0c75aee26637a81e3bcb693e7513645eb550c7adad4103ebae473f42372227fa59722490bdd9f2774cffe4bd181ffffffffffffffc3788e745965108c770cc26d1936bcc660d3ccbf2feaeb4603347ab0", 0x93}], 0x2, 0x8) close(r1) (async) socket$nl_route(0x10, 0x3, 0x0) (async) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000001c0)={0x28, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @val={0xc, 0x99, {0x938, 0x78}}}}}, 0x28}}, 0x0) (async) r5 = socket$nl_generic(0x10, 0x3, 0x10) r6 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async, rerun: 32) ioctl$sock_SIOCGIFINDEX_80211(r5, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) (rerun: 32) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r5, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r6, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r7}, @void}}}, 0x1c}}, 0x0) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(0xffffffffffffffff, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r8, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r9}, @void}}}, 0x1c}}, 0x0) (async) r10 = socket$nl_generic(0x10, 0x3, 0x10) r11 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r10, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r10, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r11, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r12}, @void}}}, 0x1c}}, 0x0) (async, rerun: 64) sendmsg$TIPC_NL_KEY_FLUSH(r5, &(0x7f0000000000)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000100)=ANY=[@ANYRES32, @ANYRESHEX=r12], 0x60}}, 0x8000) (async, rerun: 64) write(0xffffffffffffffff, &(0x7f0000000180)="220000001400256304000000000000040208031301000000080002", 0x1b) splice(r0, 0x0, r1, 0x0, 0x100000000, 0x0) (async) r13 = socket$netlink(0x10, 0x3, 0x0) (async, rerun: 64) r14 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async, rerun: 64) ioctl$sock_SIOCGIFINDEX_80211(0xffffffffffffffff, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(0xffffffffffffffff, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r14, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r15}, @void}}}, 0x1c}}, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(0xffffffffffffffff, 0x40086607, &(0x7f0000000140)=0x6) (async) writev(r13, &(0x7f00000003c0)=[{&(0x7f0000000180)="390000001300034700bb65e1c3e4ffff01000000010000005600000025000000190004000400000007fd17e5ffff0800040000000000000000", 0x39}], 0x1) 19:47:17 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r9 = socket$nl_generic(0x10, 0x3, 0x10) r10 = syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) sendmsg$TIPC_CMD_RESET_LINK_STATS(r9, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000140)={0x30, r10, 0x1, 0x0, 0x0, {{}, {}, {0x3, 0x14, 'broadcast-link\x00'}}}, 0x30}}, 0x0) 19:47:17 executing program 3: getsockopt$inet_sctp_SCTP_GET_ASSOC_STATS(0xffffffffffffffff, 0x84, 0x70, &(0x7f0000000180)={0x0, @in6={{0xa, 0x4e22, 0x2, @local, 0x1}}, [0x8, 0x10000, 0x7fffffff, 0x9, 0x1, 0x5, 0x6, 0x5, 0x1, 0x8, 0x7f, 0x7fffffffffffffff, 0x8000000000000000, 0x4, 0xffffffffffffffff]}, &(0x7f0000000000)=0x100) ioctl$EXT4_IOC_GETSTATE(0xffffffffffffffff, 0x40046629, &(0x7f0000000040)) sendmsg$NL80211_CMD_RELOAD_REGDB(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000080), 0xc, &(0x7f00000002c0)={&(0x7f0000000280)={0x14, 0x0, 0x20, 0x70bd2d, 0x25dfdbfc, {}, ["", ""]}, 0x14}}, 0x4000) 19:47:17 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x68000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:17 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x66e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:17 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x60150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:17 executing program 3: getsockopt$inet_sctp_SCTP_GET_ASSOC_STATS(0xffffffffffffffff, 0x84, 0x70, &(0x7f0000000180)={0x0, @in6={{0xa, 0x4e22, 0x2, @local, 0x1}}, [0x8, 0x10000, 0x7fffffff, 0x9, 0x1, 0x5, 0x6, 0x5, 0x1, 0x8, 0x7f, 0x7fffffffffffffff, 0x8000000000000000, 0x4, 0xffffffffffffffff]}, &(0x7f0000000000)=0x100) ioctl$EXT4_IOC_GETSTATE(0xffffffffffffffff, 0x40046629, &(0x7f0000000040)) sendmsg$NL80211_CMD_RELOAD_REGDB(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000080), 0xc, &(0x7f00000002c0)={&(0x7f0000000280)={0x14, 0x0, 0x20, 0x70bd2d, 0x25dfdbfc, {}, ["", ""]}, 0x14}}, 0x4000) getsockopt$inet_sctp_SCTP_GET_ASSOC_STATS(0xffffffffffffffff, 0x84, 0x70, &(0x7f0000000180)={0x0, @in6={{0xa, 0x4e22, 0x2, @local, 0x1}}, [0x8, 0x10000, 0x7fffffff, 0x9, 0x1, 0x5, 0x6, 0x5, 0x1, 0x8, 0x7f, 0x7fffffffffffffff, 0x8000000000000000, 0x4, 0xffffffffffffffff]}, &(0x7f0000000000)=0x100) (async) ioctl$EXT4_IOC_GETSTATE(0xffffffffffffffff, 0x40046629, &(0x7f0000000040)) (async) sendmsg$NL80211_CMD_RELOAD_REGDB(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000080), 0xc, &(0x7f00000002c0)={&(0x7f0000000280)={0x14, 0x0, 0x20, 0x70bd2d, 0x25dfdbfc, {}, ["", ""]}, 0x14}}, 0x4000) (async) [ 2643.187759][T26742] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:17 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYRES32=r0], 0x0) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000040)={r0}) r4 = socket$nl_generic(0x10, 0x3, 0x10) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendmsg$NL80211_CMD_DEAUTHENTICATE(r4, &(0x7f0000000400)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000003c0)={&(0x7f00000002c0)={0xb8, r1, 0x4, 0x70bd2c, 0x7, {{}, {@void, @void}}, [@NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_IE={0x92, 0x2a, [@mic={0x8c, 0x18, {0xa25, "460500985c68", @long="25a4f144b238276d4f3456b7df1aabb5"}}, @erp={0x2a, 0x1, {0x0, 0x0, 0x1}}, @rann={0x7e, 0x15, {{0x1, 0x4}, 0x1f, 0xfe, @device_b, 0x42379d3e, 0x0, 0x10000}}, @preq={0x82, 0x51, @not_ext={{0x0, 0x1, 0x1}, 0x7f, 0x81, 0x4, @device_a, 0x4, "", 0xce, 0x5, 0x5, [{{0x0, 0x0, 0x1}, @device_a, 0x5}, {{}, @device_b, 0x401}, {{0x0, 0x0, 0x1}, @broadcast, 0x5}, {{0x1}, @device_b, 0x2}, {{0x0, 0x0, 0x1}, @broadcast, 0x5}]}}, @challenge={0x10, 0x1, 0x1c}, @ibss={0x6, 0x2, 0x5}]}, @NL80211_ATTR_SSID={0x4}]}, 0xb8}, 0x1, 0x0, 0x0, 0x8c0}, 0x1) r7 = syz_genetlink_get_family_id$nl80211(&(0x7f00000000c0), r0) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000100)={'wlan1\x00', 0x0}) r9 = socket$inet_sctp(0x2, 0xed0e0e6392dba1f, 0x84) ioctl$sock_inet_SIOCGIFDSTADDR(r9, 0x8917, &(0x7f0000000440)={'veth0_virt_wifi\x00', {0x2, 0x0, @initdev}}) sendmsg$NL80211_CMD_SET_MCAST_RATE(r3, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x40, r7, 0x800, 0x70bd2b, 0x25dfdbff, {{}, {@val={0x8, 0x3, r8}, @val={0xc, 0x99, {0x4, 0x57}}}}, [@NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xf0}, @NL80211_ATTR_MCAST_RATE={0x8}, @NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xf0}]}, 0x40}, 0x1, 0x0, 0x0, 0x20014094}, 0x890) [ 2643.345241][T26742] bond1231: entered promiscuous mode [ 2643.378846][T26742] 8021q: adding VLAN 0 to HW filter on device bond1231 [ 2643.425191][T26746] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2643.574036][T26746] bond1192: entered promiscuous mode [ 2643.613876][T26746] 8021q: adding VLAN 0 to HW filter on device bond1192 [ 2643.642937][T26748] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2643.762443][T26748] bond1114: entered promiscuous mode [ 2643.780349][T26748] 8021q: adding VLAN 0 to HW filter on device bond1114 [ 2643.956819][T26753] bond1231: (slave bridge1161): making interface the new active one [ 2643.975017][T26753] bridge1161: entered promiscuous mode 19:47:18 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x67e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2644.001302][T26753] bond1231: (slave bridge1161): Enslaving as an active interface with an up link 19:47:18 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x688d0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2644.123145][T26756] bond1114: (slave bridge1044): making interface the new active one [ 2644.132598][T26756] bridge1044: entered promiscuous mode [ 2644.150148][T26756] bond1114: (slave bridge1044): Enslaving as an active interface with an up link [ 2644.269168][T26755] bond1192: (slave bridge1095): making interface the new active one [ 2644.278770][T26755] bridge1095: entered promiscuous mode [ 2644.304925][T26755] bond1192: (slave bridge1095): Enslaving as an active interface with an up link 19:47:18 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x61150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2644.323031][T26749] __nla_validate_parse: 2 callbacks suppressed [ 2644.323056][T26749] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2644.359949][T26761] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2644.428425][T26763] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2644.439633][T26775] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:18 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYRES32=r0], 0x0) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000040)={r0}) r4 = socket$nl_generic(0x10, 0x3, 0x10) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendmsg$NL80211_CMD_DEAUTHENTICATE(r4, &(0x7f0000000400)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000003c0)={&(0x7f00000002c0)={0xb8, r1, 0x4, 0x70bd2c, 0x7, {{}, {@void, @void}}, [@NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_IE={0x92, 0x2a, [@mic={0x8c, 0x18, {0xa25, "460500985c68", @long="25a4f144b238276d4f3456b7df1aabb5"}}, @erp={0x2a, 0x1, {0x0, 0x0, 0x1}}, @rann={0x7e, 0x15, {{0x1, 0x4}, 0x1f, 0xfe, @device_b, 0x42379d3e, 0x0, 0x10000}}, @preq={0x82, 0x51, @not_ext={{0x0, 0x1, 0x1}, 0x7f, 0x81, 0x4, @device_a, 0x4, "", 0xce, 0x5, 0x5, [{{0x0, 0x0, 0x1}, @device_a, 0x5}, {{}, @device_b, 0x401}, {{0x0, 0x0, 0x1}, @broadcast, 0x5}, {{0x1}, @device_b, 0x2}, {{0x0, 0x0, 0x1}, @broadcast, 0x5}]}}, @challenge={0x10, 0x1, 0x1c}, @ibss={0x6, 0x2, 0x5}]}, @NL80211_ATTR_SSID={0x4}]}, 0xb8}, 0x1, 0x0, 0x0, 0x8c0}, 0x1) r7 = syz_genetlink_get_family_id$nl80211(&(0x7f00000000c0), r0) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000100)={'wlan1\x00', 0x0}) r9 = socket$inet_sctp(0x2, 0xed0e0e6392dba1f, 0x84) ioctl$sock_inet_SIOCGIFDSTADDR(r9, 0x8917, &(0x7f0000000440)={'veth0_virt_wifi\x00', {0x2, 0x0, @initdev}}) sendmsg$NL80211_CMD_SET_MCAST_RATE(r3, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x40, r7, 0x800, 0x70bd2b, 0x25dfdbff, {{}, {@val={0x8, 0x3, r8}, @val={0xc, 0x99, {0x4, 0x57}}}}, [@NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xf0}, @NL80211_ATTR_MCAST_RATE={0x8}, @NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xf0}]}, 0x40}, 0x1, 0x0, 0x0, 0x20014094}, 0x890) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) (async) syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYRES32=r0], 0x0) (async) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000040)={r0}) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_DEAUTHENTICATE(r4, &(0x7f0000000400)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000003c0)={&(0x7f00000002c0)={0xb8, r1, 0x4, 0x70bd2c, 0x7, {{}, {@void, @void}}, [@NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_IE={0x92, 0x2a, [@mic={0x8c, 0x18, {0xa25, "460500985c68", @long="25a4f144b238276d4f3456b7df1aabb5"}}, @erp={0x2a, 0x1, {0x0, 0x0, 0x1}}, @rann={0x7e, 0x15, {{0x1, 0x4}, 0x1f, 0xfe, @device_b, 0x42379d3e, 0x0, 0x10000}}, @preq={0x82, 0x51, @not_ext={{0x0, 0x1, 0x1}, 0x7f, 0x81, 0x4, @device_a, 0x4, "", 0xce, 0x5, 0x5, [{{0x0, 0x0, 0x1}, @device_a, 0x5}, {{}, @device_b, 0x401}, {{0x0, 0x0, 0x1}, @broadcast, 0x5}, {{0x1}, @device_b, 0x2}, {{0x0, 0x0, 0x1}, @broadcast, 0x5}]}}, @challenge={0x10, 0x1, 0x1c}, @ibss={0x6, 0x2, 0x5}]}, @NL80211_ATTR_SSID={0x4}]}, 0xb8}, 0x1, 0x0, 0x0, 0x8c0}, 0x1) (async) syz_genetlink_get_family_id$nl80211(&(0x7f00000000c0), r0) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000100)={'wlan1\x00'}) (async) socket$inet_sctp(0x2, 0xed0e0e6392dba1f, 0x84) (async) ioctl$sock_inet_SIOCGIFDSTADDR(r9, 0x8917, &(0x7f0000000440)={'veth0_virt_wifi\x00', {0x2, 0x0, @initdev}}) (async) sendmsg$NL80211_CMD_SET_MCAST_RATE(r3, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x40, r7, 0x800, 0x70bd2b, 0x25dfdbff, {{}, {@val={0x8, 0x3, r8}, @val={0xc, 0x99, {0x4, 0x57}}}}, [@NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xf0}, @NL80211_ATTR_MCAST_RATE={0x8}, @NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xf0}]}, 0x40}, 0x1, 0x0, 0x0, 0x20014094}, 0x890) (async) [ 2644.541134][T26775] bond1232: entered promiscuous mode [ 2644.547019][T26775] 8021q: adding VLAN 0 to HW filter on device bond1232 [ 2644.645449][T26776] bond1232: (slave bridge1162): making interface the new active one [ 2644.653698][T26776] bridge1162: entered promiscuous mode [ 2644.665618][T26776] bond1232: (slave bridge1162): Enslaving as an active interface with an up link [ 2644.677743][T26779] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2644.772287][T26779] bond1115: entered promiscuous mode [ 2644.782864][T26779] 8021q: adding VLAN 0 to HW filter on device bond1115 [ 2644.880988][T26781] bond1115: (slave bridge1045): making interface the new active one [ 2644.890344][T26781] bridge1045: entered promiscuous mode [ 2644.901425][T26781] bond1115: (slave bridge1045): Enslaving as an active interface with an up link [ 2644.933284][T26783] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2645.042854][T26783] bond1193: entered promiscuous mode [ 2645.055224][T26783] 8021q: adding VLAN 0 to HW filter on device bond1193 [ 2645.190240][T26785] bond1193: (slave bridge1096): making interface the new active one [ 2645.199330][T26785] bridge1096: entered promiscuous mode [ 2645.212690][T26785] bond1193: (slave bridge1096): Enslaving as an active interface with an up link 19:47:21 executing program 0: r0 = socket$inet(0x2, 0x4000000000000001, 0x0) setsockopt$inet_tcp_int(r0, 0x6, 0x19, &(0x7f00000000c0)=0x7b, 0x4) bind$inet(r0, &(0x7f0000000000)={0x2, 0x4e23, @broadcast}, 0x10) bpf$MAP_UPDATE_ELEM(0x2, &(0x7f00000001c0)={0x1, &(0x7f0000000080)="4b8a3c14f2eae10a", &(0x7f0000000100)=@buf="042605de0465c7547c9c3bf2344ce68f0291148ded19280e3ea38a181750f28d0a36e9f50980b422653811db7df98a5b9444fbdb527d89e3b63f0c317143aa93d4e35cbd9cfe598b55620ec674da1d4ccf6ffc7bd0139a95475cbcb6a51e1c878b1b8cd748ae15c529b4b0f7325d14bf562e44d1e52dc63b3b483b0ee75bece7429b89672571061044978248893ebe808cb0f0b0f5efb7d68be3999d4b57a6003935356a7643bb86332b55a79b808a651167a4141ff1cf8d", 0x2}, 0x20) setsockopt$inet_tcp_int(r0, 0x6, 0xa, &(0x7f00000025c0)=0xe7d, 0x4) sendto$inet(r0, 0x0, 0x0, 0x200007fd, &(0x7f0000000040)={0x2, 0x4e23, @local}, 0x10) setsockopt$sock_int(r0, 0x1, 0x8, &(0x7f0000000600), 0x4) sendto$inet(r0, &(0x7f00000012c0)="0c268a927f1f6588b967481241ba78600a34f65ac618ded8974895abeaf4b4834ff922b3f1e0b02bd67aa03859bcecc7a95425a3a07e758044ab4ea6f7ae55d88fecf90b1a7511bf746bec66ba", 0xfe6a, 0x11, 0x0, 0x27) recvmsg(r0, &(0x7f0000001500)={0x0, 0xa, &(0x7f0000002200)=[{&(0x7f00000035c0)=""/4106, 0x200045ca}], 0x2, 0x0, 0x46, 0x40024d}, 0x100) 19:47:21 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r9 = socket$nl_generic(0x10, 0x3, 0x10) r10 = syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) sendmsg$TIPC_CMD_RESET_LINK_STATS(r9, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000140)={0x30, r10, 0x1, 0x0, 0x0, {{}, {}, {0x3, 0x14, 'broadcast-link\x00'}}}, 0x30}}, 0x0) 19:47:21 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x68000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:21 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6c000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:21 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x62150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:21 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) (async) syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYRES32=r0], 0x0) (async, rerun: 64) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000040)={r0}) (rerun: 64) r4 = socket$nl_generic(0x10, 0x3, 0x10) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendmsg$NL80211_CMD_DEAUTHENTICATE(r4, &(0x7f0000000400)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000003c0)={&(0x7f00000002c0)={0xb8, r1, 0x4, 0x70bd2c, 0x7, {{}, {@void, @void}}, [@NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_LOCAL_STATE_CHANGE={0x4}, @NL80211_ATTR_IE={0x92, 0x2a, [@mic={0x8c, 0x18, {0xa25, "460500985c68", @long="25a4f144b238276d4f3456b7df1aabb5"}}, @erp={0x2a, 0x1, {0x0, 0x0, 0x1}}, @rann={0x7e, 0x15, {{0x1, 0x4}, 0x1f, 0xfe, @device_b, 0x42379d3e, 0x0, 0x10000}}, @preq={0x82, 0x51, @not_ext={{0x0, 0x1, 0x1}, 0x7f, 0x81, 0x4, @device_a, 0x4, "", 0xce, 0x5, 0x5, [{{0x0, 0x0, 0x1}, @device_a, 0x5}, {{}, @device_b, 0x401}, {{0x0, 0x0, 0x1}, @broadcast, 0x5}, {{0x1}, @device_b, 0x2}, {{0x0, 0x0, 0x1}, @broadcast, 0x5}]}}, @challenge={0x10, 0x1, 0x1c}, @ibss={0x6, 0x2, 0x5}]}, @NL80211_ATTR_SSID={0x4}]}, 0xb8}, 0x1, 0x0, 0x0, 0x8c0}, 0x1) (async) r7 = syz_genetlink_get_family_id$nl80211(&(0x7f00000000c0), r0) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000100)={'wlan1\x00', 0x0}) r9 = socket$inet_sctp(0x2, 0xed0e0e6392dba1f, 0x84) ioctl$sock_inet_SIOCGIFDSTADDR(r9, 0x8917, &(0x7f0000000440)={'veth0_virt_wifi\x00', {0x2, 0x0, @initdev}}) sendmsg$NL80211_CMD_SET_MCAST_RATE(r3, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x40, r7, 0x800, 0x70bd2b, 0x25dfdbff, {{}, {@val={0x8, 0x3, r8}, @val={0xc, 0x99, {0x4, 0x57}}}}, [@NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xf0}, @NL80211_ATTR_MCAST_RATE={0x8}, @NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xf0}]}, 0x40}, 0x1, 0x0, 0x0, 0x20014094}, 0x890) [ 2647.935690][T26804] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:22 executing program 0: r0 = socket$inet(0x2, 0x4000000000000001, 0x0) setsockopt$inet_tcp_int(r0, 0x6, 0x19, &(0x7f00000000c0)=0x7b, 0x4) (async) bind$inet(r0, &(0x7f0000000000)={0x2, 0x4e23, @broadcast}, 0x10) (async) bpf$MAP_UPDATE_ELEM(0x2, &(0x7f00000001c0)={0x1, &(0x7f0000000080)="4b8a3c14f2eae10a", &(0x7f0000000100)=@buf="042605de0465c7547c9c3bf2344ce68f0291148ded19280e3ea38a181750f28d0a36e9f50980b422653811db7df98a5b9444fbdb527d89e3b63f0c317143aa93d4e35cbd9cfe598b55620ec674da1d4ccf6ffc7bd0139a95475cbcb6a51e1c878b1b8cd748ae15c529b4b0f7325d14bf562e44d1e52dc63b3b483b0ee75bece7429b89672571061044978248893ebe808cb0f0b0f5efb7d68be3999d4b57a6003935356a7643bb86332b55a79b808a651167a4141ff1cf8d", 0x2}, 0x20) (async, rerun: 32) setsockopt$inet_tcp_int(r0, 0x6, 0xa, &(0x7f00000025c0)=0xe7d, 0x4) (rerun: 32) sendto$inet(r0, 0x0, 0x0, 0x200007fd, &(0x7f0000000040)={0x2, 0x4e23, @local}, 0x10) (async, rerun: 32) setsockopt$sock_int(r0, 0x1, 0x8, &(0x7f0000000600), 0x4) (async, rerun: 32) sendto$inet(r0, &(0x7f00000012c0)="0c268a927f1f6588b967481241ba78600a34f65ac618ded8974895abeaf4b4834ff922b3f1e0b02bd67aa03859bcecc7a95425a3a07e758044ab4ea6f7ae55d88fecf90b1a7511bf746bec66ba", 0xfe6a, 0x11, 0x0, 0x27) (async) recvmsg(r0, &(0x7f0000001500)={0x0, 0xa, &(0x7f0000002200)=[{&(0x7f00000035c0)=""/4106, 0x200045ca}], 0x2, 0x0, 0x46, 0x40024d}, 0x100) [ 2648.042274][T26804] bond1233: entered promiscuous mode [ 2648.054185][T26804] 8021q: adding VLAN 0 to HW filter on device bond1233 [ 2648.084831][T26803] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2648.165378][T26803] bond1116: entered promiscuous mode [ 2648.173550][T26803] 8021q: adding VLAN 0 to HW filter on device bond1116 19:47:22 executing program 0: r0 = socket$inet(0x2, 0x4000000000000001, 0x0) setsockopt$inet_tcp_int(r0, 0x6, 0x19, &(0x7f00000000c0)=0x7b, 0x4) (async) bind$inet(r0, &(0x7f0000000000)={0x2, 0x4e23, @broadcast}, 0x10) (async) bpf$MAP_UPDATE_ELEM(0x2, &(0x7f00000001c0)={0x1, &(0x7f0000000080)="4b8a3c14f2eae10a", &(0x7f0000000100)=@buf="042605de0465c7547c9c3bf2344ce68f0291148ded19280e3ea38a181750f28d0a36e9f50980b422653811db7df98a5b9444fbdb527d89e3b63f0c317143aa93d4e35cbd9cfe598b55620ec674da1d4ccf6ffc7bd0139a95475cbcb6a51e1c878b1b8cd748ae15c529b4b0f7325d14bf562e44d1e52dc63b3b483b0ee75bece7429b89672571061044978248893ebe808cb0f0b0f5efb7d68be3999d4b57a6003935356a7643bb86332b55a79b808a651167a4141ff1cf8d", 0x2}, 0x20) setsockopt$inet_tcp_int(r0, 0x6, 0xa, &(0x7f00000025c0)=0xe7d, 0x4) sendto$inet(r0, 0x0, 0x0, 0x200007fd, &(0x7f0000000040)={0x2, 0x4e23, @local}, 0x10) (async) setsockopt$sock_int(r0, 0x1, 0x8, &(0x7f0000000600), 0x4) (async) sendto$inet(r0, &(0x7f00000012c0)="0c268a927f1f6588b967481241ba78600a34f65ac618ded8974895abeaf4b4834ff922b3f1e0b02bd67aa03859bcecc7a95425a3a07e758044ab4ea6f7ae55d88fecf90b1a7511bf746bec66ba", 0xfe6a, 0x11, 0x0, 0x27) recvmsg(r0, &(0x7f0000001500)={0x0, 0xa, &(0x7f0000002200)=[{&(0x7f00000035c0)=""/4106, 0x200045ca}], 0x2, 0x0, 0x46, 0x40024d}, 0x100) 19:47:22 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYRESDEC=0x0], 0x0) [ 2648.207394][T26810] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:22 executing program 0: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r1, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) connect$inet6(r1, &(0x7f0000000040)={0xa, 0x0, 0x0, @dev, 0xf}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r1, 0x6, 0x1f, &(0x7f0000000000), 0x4) setsockopt$inet6_tcp_TLS_TX(r1, 0x11a, 0x2, &(0x7f0000000180)=@ccm_128={{0x303}, "15c1c3b61233bb0b", "dd060a022aefe3121a45ed6d124267d5", "9638f6d8", "7592701356c7bfa6"}, 0x28) setsockopt$inet6_tcp_int(r1, 0x6, 0x13, &(0x7f0000000080)=0x100000001, 0x4) connect$inet6(r0, &(0x7f0000000200)={0xa, 0x0, 0x0, @loopback}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r0, 0x6, 0x1f, &(0x7f0000000540), 0x4) setsockopt$inet6_tcp_TLS_TX(r0, 0x11a, 0x300, 0x0, 0x0) setsockopt$inet6_tcp_TLS_TX(0xffffffffffffffff, 0x6, 0x1, &(0x7f00000000c0)=@ccm_128={{0x304}, "55f0db1bdbd14dd4", "51cc955b6636b2415393114f9fc92f97", "96b7452c", "e1923e36ba797131"}, 0x28) 19:47:22 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYRESDEC=0x0], 0x0) [ 2648.374199][T26810] bond1194: entered promiscuous mode [ 2648.394326][T26810] 8021q: adding VLAN 0 to HW filter on device bond1194 [ 2648.534256][T26816] bond1116: (slave bridge1046): making interface the new active one [ 2648.559282][T26816] bridge1046: entered promiscuous mode 19:47:22 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYRESDEC=0x0], 0x0) syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYRESDEC=0x0], 0x0) (async) [ 2648.597252][T26816] bond1116: (slave bridge1046): Enslaving as an active interface with an up link 19:47:22 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x74000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2648.746216][T26817] bond1233: (slave bridge1163): making interface the new active one [ 2648.756976][T26817] bridge1163: entered promiscuous mode [ 2648.775001][T26817] bond1233: (slave bridge1163): Enslaving as an active interface with an up link [ 2648.799154][T26807] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2648.886527][T26818] bond1194: (slave bridge1097): making interface the new active one [ 2648.895750][T26818] bridge1097: entered promiscuous mode [ 2648.908946][T26818] bond1194: (slave bridge1097): Enslaving as an active interface with an up link 19:47:23 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) 19:47:23 executing program 3: syz_emit_ethernet(0x7d, &(0x7f0000000140)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500006f00000000002f907864010100640101020420880b001500007fff4dbdc96046cbb4d762dbd54e2535734c87bf4d94bf00000800000086dd080088be0000000100000000000000080022eb000000002000800002000000000000800000000000000000008000009f8bf527a979d671984e2a5b9d0aa7f46af5ca7c6e0395255ed17c1de35dc65959d9901c07f0345ef575268723f4f32a6ea7a7a57ac5b38c95b7c53ac0a029cbd141386d8ada1f7283eb93cd6d1e6daa3620e3a3e61087ca8073759edd7661f69d3be0efe7df4bf5599b6ebfc6e68d3d7b38eb33107910210b000000000000cdf6f0f9db000000"], 0x0) r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000000), 0x4000, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000040)={0xffffffffffffffff}) ioctl$F2FS_IOC_GARBAGE_COLLECT(r1, 0x4004f506, &(0x7f0000000080)) write$tun(r0, &(0x7f0000000240)=ANY=[@ANYBLOB="0003040005080600360042d40083b3aefed26535322ea6c50fba0c6f48b09e878cd5bda3ace50df91c066dc1c4c64a77f4cf8af2deba061723a93840e8cb1641cd9207733bbb34c06811cd5074c595decb3313e549bd0dc4b74a8814e923db2226f764c285d67e16d049738ffa51ae00cbf412ecca6522bbf77b076d088c35fc9d87d87b6d4fbbb79b6e516677706ec4d513f9621b74577abe6d7123640761f29157f6a4c658c8cd21c1b4ab2f49df126e0695d8ee99d070bff73813b22b8cfa6c394de4702a0e089e9aa5313dee47d7cac0328948b23fafb3e7bb3bd1916756b9b8e0a2aae1acf4798c8aee0954c171b8330e9e241a08e5d8c294f61ff55fdd8a0d37022d199f6a0d81bf1b12add7d975c9b461faeedd683db2a5d062a70300eedf06767f6e73f2f274123e309874050686ce45b2e1cddac7ac0fd167e25660e8fc196b288e20d6645150fca0429cb269167521fa7f97fcc2495dc7194fba32553e70db2f3c93dbb8e5ea7a7294a7fb0af197ce52e4b934ff059a6062df01b52b037d8bbe61d9da1a4e2ebdcdc07088acf327d17298f3de4395db365043fa92aaa2b090794d62febee90349c6baa6370ce3539774f93b66cac59a24df3865eccedb86f33a71d08a3cef5f521575296e051016c5e3197ad080654c8900043a4f0f4dd3174c53f0b738925cde8c68b7b75a0b9d3c0f7dcb0750eb0131be00b01dfb359ff1d9329bf4110e3c0e10be92081f46525059cd52d16757114591707fbfa0fa02e3823b5e3cb2ad37492b469e48912293ea335eca8b82f750cfe6d831c6bd966508f1d969f26cd55cff90f955d1fc9797ab75c5695be7ebdad40ae271dc5bf1724c1562144c85060de44accfeb905bb73a20ad501a296c554322b101b33d0c79544b35fddd63b6dc65f26ac57f685b6f55c46c478fd83538f1789e3023d1ab467638da6585cf010e550fce622c5c5bd88fa4e87121a4947feec6ca1dccc2f70a6d86ef30f0bb03e5495758e3930e1e1c851585d2cc705445d68df4b72c6f6dd8b84149ed1163ed2cbc3f96ec80f6208b0ee8ba4ceae24aab5c0e6ed5a480e51b06495d74aecc90496307a9d131f24900d8d8baaa35bec4921ef3a9db9fe416c2bc27b68d5f16c0d884e33d947497fd2844a95c2eb12246ee908db939c580072c43dcbdf5244170ce65923a74ae9f9cc348923fabf41b500cb7bf5efd6dfde3499c4c5143077dca606ec81e2bce6a510526c3bf3cb52fe2bd6708c2b50ec990c31f3117dc7f417a80485cba01622ba9f0d48268139a5824a3e2e427d0d40f29f221eeb679bdd4fd44f2f5a26956c5490d561e225a31ba54ed7fea0b71978033599b736e263caff33c17081139de7bec73ff8bd27270f114b9eb775774465335f6c86b23237471633012a294fd41fba1daffd1c077d14ab1a081c07f30073b213992a3ceda28d9828f37b7a8d25672c62e9255dd03e15b264297669df8ae1fbab8448ff5b34df4a4c9c3664640bbe0e57310edcf6385656b89689526e28579752ead19e857924a4814c7571924af7639daed3bb96e7a22f193da45ad4c5461e98bd0ebcbfefd04ff20fdb3a87f8f6263084f39117e598cfedeb975c01a15ce1a8f3d5a8992f7eff367402b62b73e20e2df66552db084aeb6778ea5efd66d3b81fa8fe218f73a57adf4acf28974e0ad7894a97e455c282c829a95912cb03f15dc6dcebc4f7712821b23322cb9e701e3377d8a38c3885f9905466ed819c476dae530bf3589bfd637cd8aac5640700000029bcb34b2b30ba9aa54b8b0d2b8defd321803ca475636e24916a040229b9c6303b0de8b14e08ebd496155f1d575954d8d23b57186ddcef5307f99f04a30f6bc2c3fcdda52b223923a7b47b82d72a019b9adc45adad0c7728217c90f71b7e8b60474175756c349dce59413363cbdb5949140244ab0583b1a7c64ab8426df3ead72c2648b85f1adb1a71e14979f385396252f3ff5b660851a5583b1f71a44a212f65f3889793ac54ece93e509553837354d26ac144a4202056d1c19c92a8e6609feda9cbf9e0d128a5465b3fbfff772d3e714d0fab15cc9e29ce162b3cceb2a902b03318de90497054f823bf4e1c8a0a2a656be921542c9ca294d9318d97bf010cbb9b3274914080adbb06ec42720d5f683e48a71be608808fe9d5a5b2a398e17fa9826361479009bcfc8f0abb1ba1bdfccaf243ca1519071f96069673ba3e181b17cbf87f407cee3af2be6f16480843cb2f49af0fdc72386d25d2ddcb56f6e41f4a98f84d2bc1b891303d8484f88e319ef9727d6318f9ccc5c2d5bcc0883208bb32395f15e91b3515d13cb397cd4fee955b7525a3ec3e6528b7a846ad84b194919361cff694bc5e0a3ce18e8e99fbe2731936910e11b850d5395eafe3df4ae71103a626582c474f3994761c7057ef05f85dfb658ae8b1211ab42195c7b7f59e5dc364df2791ad9c8c1466732cdd2b9451f686dcb20e0e49a6c8fad9eac1ff993720cabdc5299e18041b7f382ef46b7821b4babdc2e837bb5ff78a2e84917ce5e8b3d52dfcdd69d1aa3843e771b594f5a85b95465ad370b2bcef1f5ba92c3ad534d56e7f7c4f0eaa5b85838733ba28ed6b270a23947ca3f9c6e5b8233c8312246c87e394da40970ddeb8b1df095845c905a0863c6db199e649fc3540ae3ebfbf6cace124cc6b04ce63da21f3141add0bdbf68a51cdf2d86f3c0f704156a73912b292381a51c626620ddc322a1dd8864d2e6ed90e7c020be58087cdd9a90d255b2ab58facfd42af86a9166dc17bbf3994fb1448512cdbddef15834db98a6442c6321fa5b3c5a4c4cb92fcfe9b884df157f051c3c04530e353f4c734d71777ff5af9457e1e705e62fb32e5198f2c6d35546ca2e20f62e549f5e9c6b711e015540a65cb7fb49e9411dd01ba37492efcbeba5636c38142c97c92a0c0637fb5245e97a6dc7374d6a443c4aadfaf6d773436643aa0be8fd3a9ecb13f35b7111ff2bc34a9075a7fb4d4fc50b5cdd2fb9549e37b62b52efe7be3c026f7b35d6382a13ac5738804e884feba55e2d907f05b4ffbbc895fefac39e67223646a7195ae5332f922a6ef09924ae01362fb7e868a5fea0442fb4148f6c3ff9388dfbafe8113bda697c76626393ebb9d15be0b52d2383ec4559c32f2c4f70f74497fd32d4aaec7f4c635745e3b4710f3c05237e486d953d821a74449a0040aec833a42ac203ae183eb5120b954028fee8fd656c2711259727430d68d2f28ae37349095fc6f7755475812c83a3caa35838960e32769d01b4d6861310a9c4fb52c92234ffe01239a1c7ebb15eb13f06508a706338f5de1b8b08d2db82582e5edea8da5b89a2e76cd6d85055e84b871c1f1cdbe6de6058a6adee964c41c67ec905c483ffc578c112ecc2f37e22bdce5b46a5f3a9bcf181797e32108ce5fbbf5b076450134b01c95252f91f8de6cfb4890cea8e8f2a450d6409d212dc983863543a6f81a1fbb0a9c83578b25901c888ed1dd4c39bec456c7ddc3471e03d4d7d22404994afeb99092f7ab7036c1ff5dc9c5734f6b72e74827b9a2eb5187415014b64d0331938d8bbfec056a5ac29510655f85a68710f1c45f9dbc5fe710755caf00d0976d842703ddd4cfdeaf574ea25210bba80b505ba064f41519994b06d6196d8bab964d6c0e15f6af611b482890bed84cf27b8a6d417b5b1cf64359d0dbde4a075d3bb1f50c4b2984e5e01fcfa634e50863ca39106715c0690266abb884d04efb5995d0a9691862b078c992167f892a534bcc8502f2b924f45977542bc2ba50f377af3dc0947da503ffb95b56f56eabe453fa60d05087b115e8facf0460f911970c0be9a57bac7e588386f874ca15b404ee18e50cba240cff48b4c891a99c4165918c504b4b0688d3922807dda03b59098b4f810a60e3fae5d0ec2addc100203b7a21dc48db5a198b3027194982b81dec75c32e01aca2b9590f5aeef683b3e772424d1fd857ea99db77fcfdce81ae82380e5bc2215af5a2ccdf53f8d722cf9dbf839dd286cffea03c260d5a9d3ebba047de83b7f5b057b6fab94d9e836a3dde122d7566dc8efad05eabe5d32f269e505d60cfb0e2cb997b51321d15b065a3a45c5dab224cd9a2516a9f86f00b9bab391e17a7fd32db73b5b266a45bb5657e8a4ded674a4018ddcc1459621f5b5f521832ab273c6b78baff8af0e947000e8f9d22b2c180652ed44022d6d9c78e7146fe9b01ebf44e6a97136ae9dfce387021a2c533ba471deee501f1b3b314032c79b559fc75ad591aaac17d84086e09125158ef2ebbe079e8ccdf42769f9512cf6550f67b184209125fe14fcfd72033683dc2ba70cd4f5392c82a472aeca628b5470aae5006943ddb2cc26c007c9648cc3a3e1430baccf9e25a536b9152a363490a18c5782589230d104eb4f374108b31cb465f33e4ee11b746677c61d01754e9cac2859a3e662a3b2bb72f46bc5a30df266fe7cae5f7b4bfa0695a008a6cb703755f4d67ae06c3b1ceb86b04ea8dc6250837e5b465130b47d12216cdaecdec743eca7cc878d58bbc73f9c258b332fb3aaa59756d5d1c48b6a13fe56b0fb9ee35f49701ececd1e87aef3e428882914b5ec0425830d2955d66cc488c347bf54ec29a75ed92b9bd39d612da5a6907dfd6be7d6c2c083a3542d7c0adc037df5e6477bd36abea8e8b310199e2c73c46a3f50be1fd5d0265bdbdb690b764d5c84beb491d42b1d256d90a789ec773fcf27e50c4f7ad9c3aa152df4364ff5495075a37832644ca63b1c42cdcdc402457bb2f6463694592e6546e647e279c7fdc0cbd92434eb8bc2361e59ab470a077d650c79a07d03c05d3b917be15d5072e2c448adcdf615f7d438d0652aa616c48adf8618e87a035e2ea3934d6c2510adf785e87cb82c64f16696c5bc463fdccdb646c32372a5351f8f22f5463868cd2a5a72be3599a8a07ba451e3fe8e132f3710281c01e92d950f4ac0f38aa8acb59ddc8788e2df798de59ff2e0470d11b7fae3190315bcc4ce86355dcfad517a22734bc89633e29d6af185445a5aefdd64b9eaa7fe06df315e0e86c6af4b90641bcb9a103d4153b0a7cff48f6b32755cafba143a400bb0debe3766b1973c7f079907a2710f5aac97a51aa11e7fb6d8a05e1835d03106c88db157285442badf9bc1eb6c3eccdffcab360c0aa295e80c45f766b57fd4026cf0608298a73b57763dfb8ca647c787de2127e8d0b67f7f2ead74107abc38c57161e768f380f87e8adfdccfe9b515caa989f66ad07745d42e71006fe5389e9b897c92682fa587e120bca7a2dac7bc01b01ecd8ec375859908f3dbc19617d6072140972d29f3dd2fe894b8cf4fb6e38c394f126b8a3b3d2e97842365ec8563fb3352f7e93bf13394c42f3324d5dfe7da59997c9b0db2b68b64911657fe8ee849de8824c1d75708d5c503cbcb6ae646d15618b16cdd43e526ea84584443b334f33d57d62add1de5419f49d562f1c38f2c25682d9752f28534adacad3fa8206e7b42735a659e5462ad22d38c61963bc67882abf87ff0110762c47dc835bfc67e1d9e0c711632668e614107bc34c33a3e8894311b7da7f637d2d162a52b4bde2cc3d7490afb82e5e473b57e92cd60e05fa107d5be8da41fd3c64c4ed2223b9cab5bf14de324d0275d9dc698390e20b111b2d07508649e4e0f23283ab42507d7316c8eb3ef2aca54e166f65966ce95decde3b25de87b65fa5907d60f6933aed5cfdc064c9d311f2d789943b8321afbc5021c1798545e7ff1ad79c490ecf97b98f36a7f8eb8fbb00000000009737aef385d9353b3940e3f7b5784e9cb74b473e05387aa12880ac67b4cb5e1f1f07cbfbf90a0751d04642a79c427293cf7e"], 0x100d) 19:47:23 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x68e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:23 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x63150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2648.947816][T26820] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2648.960126][T26821] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2648.972825][T26846] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2649.118462][T26846] bond1117: entered promiscuous mode [ 2649.127230][T26846] 8021q: adding VLAN 0 to HW filter on device bond1117 [ 2649.236205][T26847] bond1117: (slave bridge1047): making interface the new active one [ 2649.245293][T26847] bridge1047: entered promiscuous mode [ 2649.264733][T26847] bond1117: (slave bridge1047): Enslaving as an active interface with an up link [ 2649.287620][T26849] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:23 executing program 0: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r1, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) connect$inet6(r1, &(0x7f0000000040)={0xa, 0x0, 0x0, @dev, 0xf}, 0x1c) (async) connect$inet6(r1, &(0x7f0000000040)={0xa, 0x0, 0x0, @dev, 0xf}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r1, 0x6, 0x1f, &(0x7f0000000000), 0x4) (async) setsockopt$inet6_tcp_TCP_ULP(r1, 0x6, 0x1f, &(0x7f0000000000), 0x4) setsockopt$inet6_tcp_TLS_TX(r1, 0x11a, 0x2, &(0x7f0000000180)=@ccm_128={{0x303}, "15c1c3b61233bb0b", "dd060a022aefe3121a45ed6d124267d5", "9638f6d8", "7592701356c7bfa6"}, 0x28) (async) setsockopt$inet6_tcp_TLS_TX(r1, 0x11a, 0x2, &(0x7f0000000180)=@ccm_128={{0x303}, "15c1c3b61233bb0b", "dd060a022aefe3121a45ed6d124267d5", "9638f6d8", "7592701356c7bfa6"}, 0x28) setsockopt$inet6_tcp_int(r1, 0x6, 0x13, &(0x7f0000000080)=0x100000001, 0x4) connect$inet6(r0, &(0x7f0000000200)={0xa, 0x0, 0x0, @loopback}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r0, 0x6, 0x1f, &(0x7f0000000540), 0x4) setsockopt$inet6_tcp_TLS_TX(r0, 0x11a, 0x300, 0x0, 0x0) setsockopt$inet6_tcp_TLS_TX(0xffffffffffffffff, 0x6, 0x1, &(0x7f00000000c0)=@ccm_128={{0x304}, "55f0db1bdbd14dd4", "51cc955b6636b2415393114f9fc92f97", "96b7452c", "e1923e36ba797131"}, 0x28) 19:47:23 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7a000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:23 executing program 3: syz_emit_ethernet(0x7d, &(0x7f0000000140)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500006f00000000002f907864010100640101020420880b001500007fff4dbdc96046cbb4d762dbd54e2535734c87bf4d94bf00000800000086dd080088be0000000100000000000000080022eb000000002000800002000000000000800000000000000000008000009f8bf527a979d671984e2a5b9d0aa7f46af5ca7c6e0395255ed17c1de35dc65959d9901c07f0345ef575268723f4f32a6ea7a7a57ac5b38c95b7c53ac0a029cbd141386d8ada1f7283eb93cd6d1e6daa3620e3a3e61087ca8073759edd7661f69d3be0efe7df4bf5599b6ebfc6e68d3d7b38eb33107910210b000000000000cdf6f0f9db000000"], 0x0) r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000000), 0x4000, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000040)={0xffffffffffffffff}) ioctl$F2FS_IOC_GARBAGE_COLLECT(r1, 0x4004f506, &(0x7f0000000080)) write$tun(r0, &(0x7f0000000240)=ANY=[@ANYBLOB="0003040005080600360042d40083b3aefed26535322ea6c50fba0c6f48b09e878cd5bda3ace50df91c066dc1c4c64a77f4cf8af2deba061723a93840e8cb1641cd9207733bbb34c06811cd5074c595decb3313e549bd0dc4b74a8814e923db2226f764c285d67e16d049738ffa51ae00cbf412ecca6522bbf77b076d088c35fc9d87d87b6d4fbbb79b6e516677706ec4d513f9621b74577abe6d7123640761f29157f6a4c658c8cd21c1b4ab2f49df126e0695d8ee99d070bff73813b22b8cfa6c394de4702a0e089e9aa5313dee47d7cac0328948b23fafb3e7bb3bd1916756b9b8e0a2aae1acf4798c8aee0954c171b8330e9e241a08e5d8c294f61ff55fdd8a0d37022d199f6a0d81bf1b12add7d975c9b461faeedd683db2a5d062a70300eedf06767f6e73f2f274123e309874050686ce45b2e1cddac7ac0fd167e25660e8fc196b288e20d6645150fca0429cb269167521fa7f97fcc2495dc7194fba32553e70db2f3c93dbb8e5ea7a7294a7fb0af197ce52e4b934ff059a6062df01b52b037d8bbe61d9da1a4e2ebdcdc07088acf327d17298f3de4395db365043fa92aaa2b090794d62febee90349c6baa6370ce3539774f93b66cac59a24df3865eccedb86f33a71d08a3cef5f521575296e051016c5e3197ad080654c8900043a4f0f4dd3174c53f0b738925cde8c68b7b75a0b9d3c0f7dcb0750eb0131be00b01dfb359ff1d9329bf4110e3c0e10be92081f46525059cd52d16757114591707fbfa0fa02e3823b5e3cb2ad37492b469e48912293ea335eca8b82f750cfe6d831c6bd966508f1d969f26cd55cff90f955d1fc9797ab75c5695be7ebdad40ae271dc5bf1724c1562144c85060de44accfeb905bb73a20ad501a296c554322b101b33d0c79544b35fddd63b6dc65f26ac57f685b6f55c46c478fd83538f1789e3023d1ab467638da6585cf010e550fce622c5c5bd88fa4e87121a4947feec6ca1dccc2f70a6d86ef30f0bb03e5495758e3930e1e1c851585d2cc705445d68df4b72c6f6dd8b84149ed1163ed2cbc3f96ec80f6208b0ee8ba4ceae24aab5c0e6ed5a480e51b06495d74aecc90496307a9d131f24900d8d8baaa35bec4921ef3a9db9fe416c2bc27b68d5f16c0d884e33d947497fd2844a95c2eb12246ee908db939c580072c43dcbdf5244170ce65923a74ae9f9cc348923fabf41b500cb7bf5efd6dfde3499c4c5143077dca606ec81e2bce6a510526c3bf3cb52fe2bd6708c2b50ec990c31f3117dc7f417a80485cba01622ba9f0d48268139a5824a3e2e427d0d40f29f221eeb679bdd4fd44f2f5a26956c5490d561e225a31ba54ed7fea0b71978033599b736e263caff33c17081139de7bec73ff8bd27270f114b9eb775774465335f6c86b23237471633012a294fd41fba1daffd1c077d14ab1a081c07f30073b213992a3ceda28d9828f37b7a8d25672c62e9255dd03e15b264297669df8ae1fbab8448ff5b34df4a4c9c3664640bbe0e57310edcf6385656b89689526e28579752ead19e857924a4814c7571924af7639daed3bb96e7a22f193da45ad4c5461e98bd0ebcbfefd04ff20fdb3a87f8f6263084f39117e598cfedeb975c01a15ce1a8f3d5a8992f7eff367402b62b73e20e2df66552db084aeb6778ea5efd66d3b81fa8fe218f73a57adf4acf28974e0ad7894a97e455c282c829a95912cb03f15dc6dcebc4f7712821b23322cb9e701e3377d8a38c3885f9905466ed819c476dae530bf3589bfd637cd8aac5640700000029bcb34b2b30ba9aa54b8b0d2b8defd321803ca475636e24916a040229b9c6303b0de8b14e08ebd496155f1d575954d8d23b57186ddcef5307f99f04a30f6bc2c3fcdda52b223923a7b47b82d72a019b9adc45adad0c7728217c90f71b7e8b60474175756c349dce59413363cbdb5949140244ab0583b1a7c64ab8426df3ead72c2648b85f1adb1a71e14979f385396252f3ff5b660851a5583b1f71a44a212f65f3889793ac54ece93e509553837354d26ac144a4202056d1c19c92a8e6609feda9cbf9e0d128a5465b3fbfff772d3e714d0fab15cc9e29ce162b3cceb2a902b03318de90497054f823bf4e1c8a0a2a656be921542c9ca294d9318d97bf010cbb9b3274914080adbb06ec42720d5f683e48a71be608808fe9d5a5b2a398e17fa9826361479009bcfc8f0abb1ba1bdfccaf243ca1519071f96069673ba3e181b17cbf87f407cee3af2be6f16480843cb2f49af0fdc72386d25d2ddcb56f6e41f4a98f84d2bc1b891303d8484f88e319ef9727d6318f9ccc5c2d5bcc0883208bb32395f15e91b3515d13cb397cd4fee955b7525a3ec3e6528b7a846ad84b194919361cff694bc5e0a3ce18e8e99fbe2731936910e11b850d5395eafe3df4ae71103a626582c474f3994761c7057ef05f85dfb658ae8b1211ab42195c7b7f59e5dc364df2791ad9c8c1466732cdd2b9451f686dcb20e0e49a6c8fad9eac1ff993720cabdc5299e18041b7f382ef46b7821b4babdc2e837bb5ff78a2e84917ce5e8b3d52dfcdd69d1aa3843e771b594f5a85b95465ad370b2bcef1f5ba92c3ad534d56e7f7c4f0eaa5b85838733ba28ed6b270a23947ca3f9c6e5b8233c8312246c87e394da40970ddeb8b1df095845c905a0863c6db199e649fc3540ae3ebfbf6cace124cc6b04ce63da21f3141add0bdbf68a51cdf2d86f3c0f704156a73912b292381a51c626620ddc322a1dd8864d2e6ed90e7c020be58087cdd9a90d255b2ab58facfd42af86a9166dc17bbf3994fb1448512cdbddef15834db98a6442c6321fa5b3c5a4c4cb92fcfe9b884df157f051c3c04530e353f4c734d71777ff5af9457e1e705e62fb32e5198f2c6d35546ca2e20f62e549f5e9c6b711e015540a65cb7fb49e9411dd01ba37492efcbeba5636c38142c97c92a0c0637fb5245e97a6dc7374d6a443c4aadfaf6d773436643aa0be8fd3a9ecb13f35b7111ff2bc34a9075a7fb4d4fc50b5cdd2fb9549e37b62b52efe7be3c026f7b35d6382a13ac5738804e884feba55e2d907f05b4ffbbc895fefac39e67223646a7195ae5332f922a6ef09924ae01362fb7e868a5fea0442fb4148f6c3ff9388dfbafe8113bda697c76626393ebb9d15be0b52d2383ec4559c32f2c4f70f74497fd32d4aaec7f4c635745e3b4710f3c05237e486d953d821a74449a0040aec833a42ac203ae183eb5120b954028fee8fd656c2711259727430d68d2f28ae37349095fc6f7755475812c83a3caa35838960e32769d01b4d6861310a9c4fb52c92234ffe01239a1c7ebb15eb13f06508a706338f5de1b8b08d2db82582e5edea8da5b89a2e76cd6d85055e84b871c1f1cdbe6de6058a6adee964c41c67ec905c483ffc578c112ecc2f37e22bdce5b46a5f3a9bcf181797e32108ce5fbbf5b076450134b01c95252f91f8de6cfb4890cea8e8f2a450d6409d212dc983863543a6f81a1fbb0a9c83578b25901c888ed1dd4c39bec456c7ddc3471e03d4d7d22404994afeb99092f7ab7036c1ff5dc9c5734f6b72e74827b9a2eb5187415014b64d0331938d8bbfec056a5ac29510655f85a68710f1c45f9dbc5fe710755caf00d0976d842703ddd4cfdeaf574ea25210bba80b505ba064f41519994b06d6196d8bab964d6c0e15f6af611b482890bed84cf27b8a6d417b5b1cf64359d0dbde4a075d3bb1f50c4b2984e5e01fcfa634e50863ca39106715c0690266abb884d04efb5995d0a9691862b078c992167f892a534bcc8502f2b924f45977542bc2ba50f377af3dc0947da503ffb95b56f56eabe453fa60d05087b115e8facf0460f911970c0be9a57bac7e588386f874ca15b404ee18e50cba240cff48b4c891a99c4165918c504b4b0688d3922807dda03b59098b4f810a60e3fae5d0ec2addc100203b7a21dc48db5a198b3027194982b81dec75c32e01aca2b9590f5aeef683b3e772424d1fd857ea99db77fcfdce81ae82380e5bc2215af5a2ccdf53f8d722cf9dbf839dd286cffea03c260d5a9d3ebba047de83b7f5b057b6fab94d9e836a3dde122d7566dc8efad05eabe5d32f269e505d60cfb0e2cb997b51321d15b065a3a45c5dab224cd9a2516a9f86f00b9bab391e17a7fd32db73b5b266a45bb5657e8a4ded674a4018ddcc1459621f5b5f521832ab273c6b78baff8af0e947000e8f9d22b2c180652ed44022d6d9c78e7146fe9b01ebf44e6a97136ae9dfce387021a2c533ba471deee501f1b3b314032c79b559fc75ad591aaac17d84086e09125158ef2ebbe079e8ccdf42769f9512cf6550f67b184209125fe14fcfd72033683dc2ba70cd4f5392c82a472aeca628b5470aae5006943ddb2cc26c007c9648cc3a3e1430baccf9e25a536b9152a363490a18c5782589230d104eb4f374108b31cb465f33e4ee11b746677c61d01754e9cac2859a3e662a3b2bb72f46bc5a30df266fe7cae5f7b4bfa0695a008a6cb703755f4d67ae06c3b1ceb86b04ea8dc6250837e5b465130b47d12216cdaecdec743eca7cc878d58bbc73f9c258b332fb3aaa59756d5d1c48b6a13fe56b0fb9ee35f49701ececd1e87aef3e428882914b5ec0425830d2955d66cc488c347bf54ec29a75ed92b9bd39d612da5a6907dfd6be7d6c2c083a3542d7c0adc037df5e6477bd36abea8e8b310199e2c73c46a3f50be1fd5d0265bdbdb690b764d5c84beb491d42b1d256d90a789ec773fcf27e50c4f7ad9c3aa152df4364ff5495075a37832644ca63b1c42cdcdc402457bb2f6463694592e6546e647e279c7fdc0cbd92434eb8bc2361e59ab470a077d650c79a07d03c05d3b917be15d5072e2c448adcdf615f7d438d0652aa616c48adf8618e87a035e2ea3934d6c2510adf785e87cb82c64f16696c5bc463fdccdb646c32372a5351f8f22f5463868cd2a5a72be3599a8a07ba451e3fe8e132f3710281c01e92d950f4ac0f38aa8acb59ddc8788e2df798de59ff2e0470d11b7fae3190315bcc4ce86355dcfad517a22734bc89633e29d6af185445a5aefdd64b9eaa7fe06df315e0e86c6af4b90641bcb9a103d4153b0a7cff48f6b32755cafba143a400bb0debe3766b1973c7f079907a2710f5aac97a51aa11e7fb6d8a05e1835d03106c88db157285442badf9bc1eb6c3eccdffcab360c0aa295e80c45f766b57fd4026cf0608298a73b57763dfb8ca647c787de2127e8d0b67f7f2ead74107abc38c57161e768f380f87e8adfdccfe9b515caa989f66ad07745d42e71006fe5389e9b897c92682fa587e120bca7a2dac7bc01b01ecd8ec375859908f3dbc19617d6072140972d29f3dd2fe894b8cf4fb6e38c394f126b8a3b3d2e97842365ec8563fb3352f7e93bf13394c42f3324d5dfe7da59997c9b0db2b68b64911657fe8ee849de8824c1d75708d5c503cbcb6ae646d15618b16cdd43e526ea84584443b334f33d57d62add1de5419f49d562f1c38f2c25682d9752f28534adacad3fa8206e7b42735a659e5462ad22d38c61963bc67882abf87ff0110762c47dc835bfc67e1d9e0c711632668e614107bc34c33a3e8894311b7da7f637d2d162a52b4bde2cc3d7490afb82e5e473b57e92cd60e05fa107d5be8da41fd3c64c4ed2223b9cab5bf14de324d0275d9dc698390e20b111b2d07508649e4e0f23283ab42507d7316c8eb3ef2aca54e166f65966ce95decde3b25de87b65fa5907d60f6933aed5cfdc064c9d311f2d789943b8321afbc5021c1798545e7ff1ad79c490ecf97b98f36a7f8eb8fbb00000000009737aef385d9353b3940e3f7b5784e9cb74b473e05387aa12880ac67b4cb5e1f1f07cbfbf90a0751d04642a79c427293cf7e"], 0x100d) (async) write$tun(r0, &(0x7f0000000240)=ANY=[@ANYBLOB="0003040005080600360042d40083b3aefed26535322ea6c50fba0c6f48b09e878cd5bda3ace50df91c066dc1c4c64a77f4cf8af2deba061723a93840e8cb1641cd9207733bbb34c06811cd5074c595decb3313e549bd0dc4b74a8814e923db2226f764c285d67e16d049738ffa51ae00cbf412ecca6522bbf77b076d088c35fc9d87d87b6d4fbbb79b6e516677706ec4d513f9621b74577abe6d7123640761f29157f6a4c658c8cd21c1b4ab2f49df126e0695d8ee99d070bff73813b22b8cfa6c394de4702a0e089e9aa5313dee47d7cac0328948b23fafb3e7bb3bd1916756b9b8e0a2aae1acf4798c8aee0954c171b8330e9e241a08e5d8c294f61ff55fdd8a0d37022d199f6a0d81bf1b12add7d975c9b461faeedd683db2a5d062a70300eedf06767f6e73f2f274123e309874050686ce45b2e1cddac7ac0fd167e25660e8fc196b288e20d6645150fca0429cb269167521fa7f97fcc2495dc7194fba32553e70db2f3c93dbb8e5ea7a7294a7fb0af197ce52e4b934ff059a6062df01b52b037d8bbe61d9da1a4e2ebdcdc07088acf327d17298f3de4395db365043fa92aaa2b090794d62febee90349c6baa6370ce3539774f93b66cac59a24df3865eccedb86f33a71d08a3cef5f521575296e051016c5e3197ad080654c8900043a4f0f4dd3174c53f0b738925cde8c68b7b75a0b9d3c0f7dcb0750eb0131be00b01dfb359ff1d9329bf4110e3c0e10be92081f46525059cd52d16757114591707fbfa0fa02e3823b5e3cb2ad37492b469e48912293ea335eca8b82f750cfe6d831c6bd966508f1d969f26cd55cff90f955d1fc9797ab75c5695be7ebdad40ae271dc5bf1724c1562144c85060de44accfeb905bb73a20ad501a296c554322b101b33d0c79544b35fddd63b6dc65f26ac57f685b6f55c46c478fd83538f1789e3023d1ab467638da6585cf010e550fce622c5c5bd88fa4e87121a4947feec6ca1dccc2f70a6d86ef30f0bb03e5495758e3930e1e1c851585d2cc705445d68df4b72c6f6dd8b84149ed1163ed2cbc3f96ec80f6208b0ee8ba4ceae24aab5c0e6ed5a480e51b06495d74aecc90496307a9d131f24900d8d8baaa35bec4921ef3a9db9fe416c2bc27b68d5f16c0d884e33d947497fd2844a95c2eb12246ee908db939c580072c43dcbdf5244170ce65923a74ae9f9cc348923fabf41b500cb7bf5efd6dfde3499c4c5143077dca606ec81e2bce6a510526c3bf3cb52fe2bd6708c2b50ec990c31f3117dc7f417a80485cba01622ba9f0d48268139a5824a3e2e427d0d40f29f221eeb679bdd4fd44f2f5a26956c5490d561e225a31ba54ed7fea0b71978033599b736e263caff33c17081139de7bec73ff8bd27270f114b9eb775774465335f6c86b23237471633012a294fd41fba1daffd1c077d14ab1a081c07f30073b213992a3ceda28d9828f37b7a8d25672c62e9255dd03e15b264297669df8ae1fbab8448ff5b34df4a4c9c3664640bbe0e57310edcf6385656b89689526e28579752ead19e857924a4814c7571924af7639daed3bb96e7a22f193da45ad4c5461e98bd0ebcbfefd04ff20fdb3a87f8f6263084f39117e598cfedeb975c01a15ce1a8f3d5a8992f7eff367402b62b73e20e2df66552db084aeb6778ea5efd66d3b81fa8fe218f73a57adf4acf28974e0ad7894a97e455c282c829a95912cb03f15dc6dcebc4f7712821b23322cb9e701e3377d8a38c3885f9905466ed819c476dae530bf3589bfd637cd8aac5640700000029bcb34b2b30ba9aa54b8b0d2b8defd321803ca475636e24916a040229b9c6303b0de8b14e08ebd496155f1d575954d8d23b57186ddcef5307f99f04a30f6bc2c3fcdda52b223923a7b47b82d72a019b9adc45adad0c7728217c90f71b7e8b60474175756c349dce59413363cbdb5949140244ab0583b1a7c64ab8426df3ead72c2648b85f1adb1a71e14979f385396252f3ff5b660851a5583b1f71a44a212f65f3889793ac54ece93e509553837354d26ac144a4202056d1c19c92a8e6609feda9cbf9e0d128a5465b3fbfff772d3e714d0fab15cc9e29ce162b3cceb2a902b03318de90497054f823bf4e1c8a0a2a656be921542c9ca294d9318d97bf010cbb9b3274914080adbb06ec42720d5f683e48a71be608808fe9d5a5b2a398e17fa9826361479009bcfc8f0abb1ba1bdfccaf243ca1519071f96069673ba3e181b17cbf87f407cee3af2be6f16480843cb2f49af0fdc72386d25d2ddcb56f6e41f4a98f84d2bc1b891303d8484f88e319ef9727d6318f9ccc5c2d5bcc0883208bb32395f15e91b3515d13cb397cd4fee955b7525a3ec3e6528b7a846ad84b194919361cff694bc5e0a3ce18e8e99fbe2731936910e11b850d5395eafe3df4ae71103a626582c474f3994761c7057ef05f85dfb658ae8b1211ab42195c7b7f59e5dc364df2791ad9c8c1466732cdd2b9451f686dcb20e0e49a6c8fad9eac1ff993720cabdc5299e18041b7f382ef46b7821b4babdc2e837bb5ff78a2e84917ce5e8b3d52dfcdd69d1aa3843e771b594f5a85b95465ad370b2bcef1f5ba92c3ad534d56e7f7c4f0eaa5b85838733ba28ed6b270a23947ca3f9c6e5b8233c8312246c87e394da40970ddeb8b1df095845c905a0863c6db199e649fc3540ae3ebfbf6cace124cc6b04ce63da21f3141add0bdbf68a51cdf2d86f3c0f704156a73912b292381a51c626620ddc322a1dd8864d2e6ed90e7c020be58087cdd9a90d255b2ab58facfd42af86a9166dc17bbf3994fb1448512cdbddef15834db98a6442c6321fa5b3c5a4c4cb92fcfe9b884df157f051c3c04530e353f4c734d71777ff5af9457e1e705e62fb32e5198f2c6d35546ca2e20f62e549f5e9c6b711e015540a65cb7fb49e9411dd01ba37492efcbeba5636c38142c97c92a0c0637fb5245e97a6dc7374d6a443c4aadfaf6d773436643aa0be8fd3a9ecb13f35b7111ff2bc34a9075a7fb4d4fc50b5cdd2fb9549e37b62b52efe7be3c026f7b35d6382a13ac5738804e884feba55e2d907f05b4ffbbc895fefac39e67223646a7195ae5332f922a6ef09924ae01362fb7e868a5fea0442fb4148f6c3ff9388dfbafe8113bda697c76626393ebb9d15be0b52d2383ec4559c32f2c4f70f74497fd32d4aaec7f4c635745e3b4710f3c05237e486d953d821a74449a0040aec833a42ac203ae183eb5120b954028fee8fd656c2711259727430d68d2f28ae37349095fc6f7755475812c83a3caa35838960e32769d01b4d6861310a9c4fb52c92234ffe01239a1c7ebb15eb13f06508a706338f5de1b8b08d2db82582e5edea8da5b89a2e76cd6d85055e84b871c1f1cdbe6de6058a6adee964c41c67ec905c483ffc578c112ecc2f37e22bdce5b46a5f3a9bcf181797e32108ce5fbbf5b076450134b01c95252f91f8de6cfb4890cea8e8f2a450d6409d212dc983863543a6f81a1fbb0a9c83578b25901c888ed1dd4c39bec456c7ddc3471e03d4d7d22404994afeb99092f7ab7036c1ff5dc9c5734f6b72e74827b9a2eb5187415014b64d0331938d8bbfec056a5ac29510655f85a68710f1c45f9dbc5fe710755caf00d0976d842703ddd4cfdeaf574ea25210bba80b505ba064f41519994b06d6196d8bab964d6c0e15f6af611b482890bed84cf27b8a6d417b5b1cf64359d0dbde4a075d3bb1f50c4b2984e5e01fcfa634e50863ca39106715c0690266abb884d04efb5995d0a9691862b078c992167f892a534bcc8502f2b924f45977542bc2ba50f377af3dc0947da503ffb95b56f56eabe453fa60d05087b115e8facf0460f911970c0be9a57bac7e588386f874ca15b404ee18e50cba240cff48b4c891a99c4165918c504b4b0688d3922807dda03b59098b4f810a60e3fae5d0ec2addc100203b7a21dc48db5a198b3027194982b81dec75c32e01aca2b9590f5aeef683b3e772424d1fd857ea99db77fcfdce81ae82380e5bc2215af5a2ccdf53f8d722cf9dbf839dd286cffea03c260d5a9d3ebba047de83b7f5b057b6fab94d9e836a3dde122d7566dc8efad05eabe5d32f269e505d60cfb0e2cb997b51321d15b065a3a45c5dab224cd9a2516a9f86f00b9bab391e17a7fd32db73b5b266a45bb5657e8a4ded674a4018ddcc1459621f5b5f521832ab273c6b78baff8af0e947000e8f9d22b2c180652ed44022d6d9c78e7146fe9b01ebf44e6a97136ae9dfce387021a2c533ba471deee501f1b3b314032c79b559fc75ad591aaac17d84086e09125158ef2ebbe079e8ccdf42769f9512cf6550f67b184209125fe14fcfd72033683dc2ba70cd4f5392c82a472aeca628b5470aae5006943ddb2cc26c007c9648cc3a3e1430baccf9e25a536b9152a363490a18c5782589230d104eb4f374108b31cb465f33e4ee11b746677c61d01754e9cac2859a3e662a3b2bb72f46bc5a30df266fe7cae5f7b4bfa0695a008a6cb703755f4d67ae06c3b1ceb86b04ea8dc6250837e5b465130b47d12216cdaecdec743eca7cc878d58bbc73f9c258b332fb3aaa59756d5d1c48b6a13fe56b0fb9ee35f49701ececd1e87aef3e428882914b5ec0425830d2955d66cc488c347bf54ec29a75ed92b9bd39d612da5a6907dfd6be7d6c2c083a3542d7c0adc037df5e6477bd36abea8e8b310199e2c73c46a3f50be1fd5d0265bdbdb690b764d5c84beb491d42b1d256d90a789ec773fcf27e50c4f7ad9c3aa152df4364ff5495075a37832644ca63b1c42cdcdc402457bb2f6463694592e6546e647e279c7fdc0cbd92434eb8bc2361e59ab470a077d650c79a07d03c05d3b917be15d5072e2c448adcdf615f7d438d0652aa616c48adf8618e87a035e2ea3934d6c2510adf785e87cb82c64f16696c5bc463fdccdb646c32372a5351f8f22f5463868cd2a5a72be3599a8a07ba451e3fe8e132f3710281c01e92d950f4ac0f38aa8acb59ddc8788e2df798de59ff2e0470d11b7fae3190315bcc4ce86355dcfad517a22734bc89633e29d6af185445a5aefdd64b9eaa7fe06df315e0e86c6af4b90641bcb9a103d4153b0a7cff48f6b32755cafba143a400bb0debe3766b1973c7f079907a2710f5aac97a51aa11e7fb6d8a05e1835d03106c88db157285442badf9bc1eb6c3eccdffcab360c0aa295e80c45f766b57fd4026cf0608298a73b57763dfb8ca647c787de2127e8d0b67f7f2ead74107abc38c57161e768f380f87e8adfdccfe9b515caa989f66ad07745d42e71006fe5389e9b897c92682fa587e120bca7a2dac7bc01b01ecd8ec375859908f3dbc19617d6072140972d29f3dd2fe894b8cf4fb6e38c394f126b8a3b3d2e97842365ec8563fb3352f7e93bf13394c42f3324d5dfe7da59997c9b0db2b68b64911657fe8ee849de8824c1d75708d5c503cbcb6ae646d15618b16cdd43e526ea84584443b334f33d57d62add1de5419f49d562f1c38f2c25682d9752f28534adacad3fa8206e7b42735a659e5462ad22d38c61963bc67882abf87ff0110762c47dc835bfc67e1d9e0c711632668e614107bc34c33a3e8894311b7da7f637d2d162a52b4bde2cc3d7490afb82e5e473b57e92cd60e05fa107d5be8da41fd3c64c4ed2223b9cab5bf14de324d0275d9dc698390e20b111b2d07508649e4e0f23283ab42507d7316c8eb3ef2aca54e166f65966ce95decde3b25de87b65fa5907d60f6933aed5cfdc064c9d311f2d789943b8321afbc5021c1798545e7ff1ad79c490ecf97b98f36a7f8eb8fbb00000000009737aef385d9353b3940e3f7b5784e9cb74b473e05387aa12880ac67b4cb5e1f1f07cbfbf90a0751d04642a79c427293cf7e"], 0x100d) [ 2649.432926][T26849] bond1195: entered promiscuous mode [ 2649.441282][T26849] 8021q: adding VLAN 0 to HW filter on device bond1195 [ 2649.455584][T26855] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2649.499796][T26855] bond1234: entered promiscuous mode [ 2649.505780][T26855] 8021q: adding VLAN 0 to HW filter on device bond1234 [ 2649.632165][T26858] bond1195: (slave bridge1098): making interface the new active one [ 2649.642561][T26858] bridge1098: entered promiscuous mode [ 2649.658691][T26858] bond1195: (slave bridge1098): Enslaving as an active interface with an up link 19:47:23 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x64000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2649.831249][T26861] bond1234: (slave bridge1164): making interface the new active one [ 2649.845717][T26861] bridge1164: entered promiscuous mode [ 2649.866073][T26861] bond1234: (slave bridge1164): Enslaving as an active interface with an up link [ 2649.877359][T26859] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2649.891770][T26862] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2649.904161][T26863] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.2'. 19:47:23 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x69e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:24 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) socket$nl_generic(0x10, 0x3, 0x10) [ 2649.944241][T26868] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2650.090094][T26868] bond1118: entered promiscuous mode [ 2650.096201][T26868] 8021q: adding VLAN 0 to HW filter on device bond1118 19:47:24 executing program 3: syz_emit_ethernet(0x7d, &(0x7f0000000140)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500006f00000000002f907864010100640101020420880b001500007fff4dbdc96046cbb4d762dbd54e2535734c87bf4d94bf00000800000086dd080088be0000000100000000000000080022eb000000002000800002000000000000800000000000000000008000009f8bf527a979d671984e2a5b9d0aa7f46af5ca7c6e0395255ed17c1de35dc65959d9901c07f0345ef575268723f4f32a6ea7a7a57ac5b38c95b7c53ac0a029cbd141386d8ada1f7283eb93cd6d1e6daa3620e3a3e61087ca8073759edd7661f69d3be0efe7df4bf5599b6ebfc6e68d3d7b38eb33107910210b000000000000cdf6f0f9db000000"], 0x0) r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000000), 0x4000, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000040)={0xffffffffffffffff}) ioctl$F2FS_IOC_GARBAGE_COLLECT(r1, 0x4004f506, &(0x7f0000000080)) (async) write$tun(r0, &(0x7f0000000240)=ANY=[@ANYBLOB="0003040005080600360042d40083b3aefed26535322ea6c50fba0c6f48b09e878cd5bda3ace50df91c066dc1c4c64a77f4cf8af2deba061723a93840e8cb1641cd9207733bbb34c06811cd5074c595decb3313e549bd0dc4b74a8814e923db2226f764c285d67e16d049738ffa51ae00cbf412ecca6522bbf77b076d088c35fc9d87d87b6d4fbbb79b6e516677706ec4d513f9621b74577abe6d7123640761f29157f6a4c658c8cd21c1b4ab2f49df126e0695d8ee99d070bff73813b22b8cfa6c394de4702a0e089e9aa5313dee47d7cac0328948b23fafb3e7bb3bd1916756b9b8e0a2aae1acf4798c8aee0954c171b8330e9e241a08e5d8c294f61ff55fdd8a0d37022d199f6a0d81bf1b12add7d975c9b461faeedd683db2a5d062a70300eedf06767f6e73f2f274123e309874050686ce45b2e1cddac7ac0fd167e25660e8fc196b288e20d6645150fca0429cb269167521fa7f97fcc2495dc7194fba32553e70db2f3c93dbb8e5ea7a7294a7fb0af197ce52e4b934ff059a6062df01b52b037d8bbe61d9da1a4e2ebdcdc07088acf327d17298f3de4395db365043fa92aaa2b090794d62febee90349c6baa6370ce3539774f93b66cac59a24df3865eccedb86f33a71d08a3cef5f521575296e051016c5e3197ad080654c8900043a4f0f4dd3174c53f0b738925cde8c68b7b75a0b9d3c0f7dcb0750eb0131be00b01dfb359ff1d9329bf4110e3c0e10be92081f46525059cd52d16757114591707fbfa0fa02e3823b5e3cb2ad37492b469e48912293ea335eca8b82f750cfe6d831c6bd966508f1d969f26cd55cff90f955d1fc9797ab75c5695be7ebdad40ae271dc5bf1724c1562144c85060de44accfeb905bb73a20ad501a296c554322b101b33d0c79544b35fddd63b6dc65f26ac57f685b6f55c46c478fd83538f1789e3023d1ab467638da6585cf010e550fce622c5c5bd88fa4e87121a4947feec6ca1dccc2f70a6d86ef30f0bb03e5495758e3930e1e1c851585d2cc705445d68df4b72c6f6dd8b84149ed1163ed2cbc3f96ec80f6208b0ee8ba4ceae24aab5c0e6ed5a480e51b06495d74aecc90496307a9d131f24900d8d8baaa35bec4921ef3a9db9fe416c2bc27b68d5f16c0d884e33d947497fd2844a95c2eb12246ee908db939c580072c43dcbdf5244170ce65923a74ae9f9cc348923fabf41b500cb7bf5efd6dfde3499c4c5143077dca606ec81e2bce6a510526c3bf3cb52fe2bd6708c2b50ec990c31f3117dc7f417a80485cba01622ba9f0d48268139a5824a3e2e427d0d40f29f221eeb679bdd4fd44f2f5a26956c5490d561e225a31ba54ed7fea0b71978033599b736e263caff33c17081139de7bec73ff8bd27270f114b9eb775774465335f6c86b23237471633012a294fd41fba1daffd1c077d14ab1a081c07f30073b213992a3ceda28d9828f37b7a8d25672c62e9255dd03e15b264297669df8ae1fbab8448ff5b34df4a4c9c3664640bbe0e57310edcf6385656b89689526e28579752ead19e857924a4814c7571924af7639daed3bb96e7a22f193da45ad4c5461e98bd0ebcbfefd04ff20fdb3a87f8f6263084f39117e598cfedeb975c01a15ce1a8f3d5a8992f7eff367402b62b73e20e2df66552db084aeb6778ea5efd66d3b81fa8fe218f73a57adf4acf28974e0ad7894a97e455c282c829a95912cb03f15dc6dcebc4f7712821b23322cb9e701e3377d8a38c3885f9905466ed819c476dae530bf3589bfd637cd8aac5640700000029bcb34b2b30ba9aa54b8b0d2b8defd321803ca475636e24916a040229b9c6303b0de8b14e08ebd496155f1d575954d8d23b57186ddcef5307f99f04a30f6bc2c3fcdda52b223923a7b47b82d72a019b9adc45adad0c7728217c90f71b7e8b60474175756c349dce59413363cbdb5949140244ab0583b1a7c64ab8426df3ead72c2648b85f1adb1a71e14979f385396252f3ff5b660851a5583b1f71a44a212f65f3889793ac54ece93e509553837354d26ac144a4202056d1c19c92a8e6609feda9cbf9e0d128a5465b3fbfff772d3e714d0fab15cc9e29ce162b3cceb2a902b03318de90497054f823bf4e1c8a0a2a656be921542c9ca294d9318d97bf010cbb9b3274914080adbb06ec42720d5f683e48a71be608808fe9d5a5b2a398e17fa9826361479009bcfc8f0abb1ba1bdfccaf243ca1519071f96069673ba3e181b17cbf87f407cee3af2be6f16480843cb2f49af0fdc72386d25d2ddcb56f6e41f4a98f84d2bc1b891303d8484f88e319ef9727d6318f9ccc5c2d5bcc0883208bb32395f15e91b3515d13cb397cd4fee955b7525a3ec3e6528b7a846ad84b194919361cff694bc5e0a3ce18e8e99fbe2731936910e11b850d5395eafe3df4ae71103a626582c474f3994761c7057ef05f85dfb658ae8b1211ab42195c7b7f59e5dc364df2791ad9c8c1466732cdd2b9451f686dcb20e0e49a6c8fad9eac1ff993720cabdc5299e18041b7f382ef46b7821b4babdc2e837bb5ff78a2e84917ce5e8b3d52dfcdd69d1aa3843e771b594f5a85b95465ad370b2bcef1f5ba92c3ad534d56e7f7c4f0eaa5b85838733ba28ed6b270a23947ca3f9c6e5b8233c8312246c87e394da40970ddeb8b1df095845c905a0863c6db199e649fc3540ae3ebfbf6cace124cc6b04ce63da21f3141add0bdbf68a51cdf2d86f3c0f704156a73912b292381a51c626620ddc322a1dd8864d2e6ed90e7c020be58087cdd9a90d255b2ab58facfd42af86a9166dc17bbf3994fb1448512cdbddef15834db98a6442c6321fa5b3c5a4c4cb92fcfe9b884df157f051c3c04530e353f4c734d71777ff5af9457e1e705e62fb32e5198f2c6d35546ca2e20f62e549f5e9c6b711e015540a65cb7fb49e9411dd01ba37492efcbeba5636c38142c97c92a0c0637fb5245e97a6dc7374d6a443c4aadfaf6d773436643aa0be8fd3a9ecb13f35b7111ff2bc34a9075a7fb4d4fc50b5cdd2fb9549e37b62b52efe7be3c026f7b35d6382a13ac5738804e884feba55e2d907f05b4ffbbc895fefac39e67223646a7195ae5332f922a6ef09924ae01362fb7e868a5fea0442fb4148f6c3ff9388dfbafe8113bda697c76626393ebb9d15be0b52d2383ec4559c32f2c4f70f74497fd32d4aaec7f4c635745e3b4710f3c05237e486d953d821a74449a0040aec833a42ac203ae183eb5120b954028fee8fd656c2711259727430d68d2f28ae37349095fc6f7755475812c83a3caa35838960e32769d01b4d6861310a9c4fb52c92234ffe01239a1c7ebb15eb13f06508a706338f5de1b8b08d2db82582e5edea8da5b89a2e76cd6d85055e84b871c1f1cdbe6de6058a6adee964c41c67ec905c483ffc578c112ecc2f37e22bdce5b46a5f3a9bcf181797e32108ce5fbbf5b076450134b01c95252f91f8de6cfb4890cea8e8f2a450d6409d212dc983863543a6f81a1fbb0a9c83578b25901c888ed1dd4c39bec456c7ddc3471e03d4d7d22404994afeb99092f7ab7036c1ff5dc9c5734f6b72e74827b9a2eb5187415014b64d0331938d8bbfec056a5ac29510655f85a68710f1c45f9dbc5fe710755caf00d0976d842703ddd4cfdeaf574ea25210bba80b505ba064f41519994b06d6196d8bab964d6c0e15f6af611b482890bed84cf27b8a6d417b5b1cf64359d0dbde4a075d3bb1f50c4b2984e5e01fcfa634e50863ca39106715c0690266abb884d04efb5995d0a9691862b078c992167f892a534bcc8502f2b924f45977542bc2ba50f377af3dc0947da503ffb95b56f56eabe453fa60d05087b115e8facf0460f911970c0be9a57bac7e588386f874ca15b404ee18e50cba240cff48b4c891a99c4165918c504b4b0688d3922807dda03b59098b4f810a60e3fae5d0ec2addc100203b7a21dc48db5a198b3027194982b81dec75c32e01aca2b9590f5aeef683b3e772424d1fd857ea99db77fcfdce81ae82380e5bc2215af5a2ccdf53f8d722cf9dbf839dd286cffea03c260d5a9d3ebba047de83b7f5b057b6fab94d9e836a3dde122d7566dc8efad05eabe5d32f269e505d60cfb0e2cb997b51321d15b065a3a45c5dab224cd9a2516a9f86f00b9bab391e17a7fd32db73b5b266a45bb5657e8a4ded674a4018ddcc1459621f5b5f521832ab273c6b78baff8af0e947000e8f9d22b2c180652ed44022d6d9c78e7146fe9b01ebf44e6a97136ae9dfce387021a2c533ba471deee501f1b3b314032c79b559fc75ad591aaac17d84086e09125158ef2ebbe079e8ccdf42769f9512cf6550f67b184209125fe14fcfd72033683dc2ba70cd4f5392c82a472aeca628b5470aae5006943ddb2cc26c007c9648cc3a3e1430baccf9e25a536b9152a363490a18c5782589230d104eb4f374108b31cb465f33e4ee11b746677c61d01754e9cac2859a3e662a3b2bb72f46bc5a30df266fe7cae5f7b4bfa0695a008a6cb703755f4d67ae06c3b1ceb86b04ea8dc6250837e5b465130b47d12216cdaecdec743eca7cc878d58bbc73f9c258b332fb3aaa59756d5d1c48b6a13fe56b0fb9ee35f49701ececd1e87aef3e428882914b5ec0425830d2955d66cc488c347bf54ec29a75ed92b9bd39d612da5a6907dfd6be7d6c2c083a3542d7c0adc037df5e6477bd36abea8e8b310199e2c73c46a3f50be1fd5d0265bdbdb690b764d5c84beb491d42b1d256d90a789ec773fcf27e50c4f7ad9c3aa152df4364ff5495075a37832644ca63b1c42cdcdc402457bb2f6463694592e6546e647e279c7fdc0cbd92434eb8bc2361e59ab470a077d650c79a07d03c05d3b917be15d5072e2c448adcdf615f7d438d0652aa616c48adf8618e87a035e2ea3934d6c2510adf785e87cb82c64f16696c5bc463fdccdb646c32372a5351f8f22f5463868cd2a5a72be3599a8a07ba451e3fe8e132f3710281c01e92d950f4ac0f38aa8acb59ddc8788e2df798de59ff2e0470d11b7fae3190315bcc4ce86355dcfad517a22734bc89633e29d6af185445a5aefdd64b9eaa7fe06df315e0e86c6af4b90641bcb9a103d4153b0a7cff48f6b32755cafba143a400bb0debe3766b1973c7f079907a2710f5aac97a51aa11e7fb6d8a05e1835d03106c88db157285442badf9bc1eb6c3eccdffcab360c0aa295e80c45f766b57fd4026cf0608298a73b57763dfb8ca647c787de2127e8d0b67f7f2ead74107abc38c57161e768f380f87e8adfdccfe9b515caa989f66ad07745d42e71006fe5389e9b897c92682fa587e120bca7a2dac7bc01b01ecd8ec375859908f3dbc19617d6072140972d29f3dd2fe894b8cf4fb6e38c394f126b8a3b3d2e97842365ec8563fb3352f7e93bf13394c42f3324d5dfe7da59997c9b0db2b68b64911657fe8ee849de8824c1d75708d5c503cbcb6ae646d15618b16cdd43e526ea84584443b334f33d57d62add1de5419f49d562f1c38f2c25682d9752f28534adacad3fa8206e7b42735a659e5462ad22d38c61963bc67882abf87ff0110762c47dc835bfc67e1d9e0c711632668e614107bc34c33a3e8894311b7da7f637d2d162a52b4bde2cc3d7490afb82e5e473b57e92cd60e05fa107d5be8da41fd3c64c4ed2223b9cab5bf14de324d0275d9dc698390e20b111b2d07508649e4e0f23283ab42507d7316c8eb3ef2aca54e166f65966ce95decde3b25de87b65fa5907d60f6933aed5cfdc064c9d311f2d789943b8321afbc5021c1798545e7ff1ad79c490ecf97b98f36a7f8eb8fbb00000000009737aef385d9353b3940e3f7b5784e9cb74b473e05387aa12880ac67b4cb5e1f1f07cbfbf90a0751d04642a79c427293cf7e"], 0x100d) [ 2650.151250][T26870] bond1118: (slave bridge1048): making interface the new active one [ 2650.159483][T26870] bridge1048: entered promiscuous mode [ 2650.177895][T26870] bond1118: (slave bridge1048): Enslaving as an active interface with an up link [ 2650.191482][T26880] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:24 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x80110000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:24 executing program 0: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r1, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) connect$inet6(r1, &(0x7f0000000040)={0xa, 0x0, 0x0, @dev, 0xf}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r1, 0x6, 0x1f, &(0x7f0000000000), 0x4) setsockopt$inet6_tcp_TLS_TX(r1, 0x11a, 0x2, &(0x7f0000000180)=@ccm_128={{0x303}, "15c1c3b61233bb0b", "dd060a022aefe3121a45ed6d124267d5", "9638f6d8", "7592701356c7bfa6"}, 0x28) setsockopt$inet6_tcp_int(r1, 0x6, 0x13, &(0x7f0000000080)=0x100000001, 0x4) connect$inet6(r0, &(0x7f0000000200)={0xa, 0x0, 0x0, @loopback}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r0, 0x6, 0x1f, &(0x7f0000000540), 0x4) setsockopt$inet6_tcp_TLS_TX(r0, 0x11a, 0x300, 0x0, 0x0) setsockopt$inet6_tcp_TLS_TX(0xffffffffffffffff, 0x6, 0x1, &(0x7f00000000c0)=@ccm_128={{0x304}, "55f0db1bdbd14dd4", "51cc955b6636b2415393114f9fc92f97", "96b7452c", "e1923e36ba797131"}, 0x28) socket$inet6_tcp(0xa, 0x1, 0x0) (async) socket$inet6_tcp(0xa, 0x1, 0x0) (async) setsockopt$inet6_tcp_int(r1, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) (async) connect$inet6(r1, &(0x7f0000000040)={0xa, 0x0, 0x0, @dev, 0xf}, 0x1c) (async) setsockopt$inet6_tcp_TCP_ULP(r1, 0x6, 0x1f, &(0x7f0000000000), 0x4) (async) setsockopt$inet6_tcp_TLS_TX(r1, 0x11a, 0x2, &(0x7f0000000180)=@ccm_128={{0x303}, "15c1c3b61233bb0b", "dd060a022aefe3121a45ed6d124267d5", "9638f6d8", "7592701356c7bfa6"}, 0x28) (async) setsockopt$inet6_tcp_int(r1, 0x6, 0x13, &(0x7f0000000080)=0x100000001, 0x4) (async) connect$inet6(r0, &(0x7f0000000200)={0xa, 0x0, 0x0, @loopback}, 0x1c) (async) setsockopt$inet6_tcp_TCP_ULP(r0, 0x6, 0x1f, &(0x7f0000000540), 0x4) (async) setsockopt$inet6_tcp_TLS_TX(r0, 0x11a, 0x300, 0x0, 0x0) (async) setsockopt$inet6_tcp_TLS_TX(0xffffffffffffffff, 0x6, 0x1, &(0x7f00000000c0)=@ccm_128={{0x304}, "55f0db1bdbd14dd4", "51cc955b6636b2415393114f9fc92f97", "96b7452c", "e1923e36ba797131"}, 0x28) (async) [ 2650.371127][T26880] bond1196: entered promiscuous mode [ 2650.379781][T26880] 8021q: adding VLAN 0 to HW filter on device bond1196 [ 2650.471134][T26881] bond1196: (slave bridge1099): making interface the new active one [ 2650.479677][T26881] bridge1099: entered promiscuous mode [ 2650.501217][T26881] bond1196: (slave bridge1099): Enslaving as an active interface with an up link 19:47:24 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x64150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2650.541701][T26885] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2650.573861][T26888] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. 19:47:24 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) [ 2650.621012][T26886] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2650.710256][T26886] bond1235: entered promiscuous mode [ 2650.716332][T26886] 8021q: adding VLAN 0 to HW filter on device bond1235 [ 2650.736889][T26889] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.2'. 19:47:24 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) [ 2650.971694][T26890] bond1235: (slave bridge1165): making interface the new active one [ 2651.002201][T26890] bridge1165: entered promiscuous mode 19:47:25 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6ae70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2651.032490][T26890] bond1235: (slave bridge1165): Enslaving as an active interface with an up link [ 2651.044871][T26895] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:25 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) r3 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000040), 0xffffffffffffffff) sendmsg$IPVS_CMD_GET_CONFIG(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)={0x68, r3, 0x20, 0x70bd28, 0x25dfdbfc, {}, [@IPVS_CMD_ATTR_DEST={0x28, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ADDR={0x14, 0x1, @ipv4=@dev={0xac, 0x14, 0x14, 0x3d}}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e22}, @IPVS_DEST_ATTR_TUN_FLAGS={0x6}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x40}, @IPVS_CMD_ATTR_SERVICE={0x24, 0x1, 0x0, 0x1, [@IPVS_SVC_ATTR_AF={0x6, 0x1, 0xa}, @IPVS_SVC_ATTR_PORT={0x6, 0x4, 0x4e24}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0x400}, @IPVS_SVC_ATTR_TIMEOUT={0x8}]}]}, 0x68}, 0x1, 0x0, 0x0, 0x40}, 0x0) sendmsg$BATADV_CMD_GET_VLAN(r0, &(0x7f0000000540)={&(0x7f0000000480)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000500)={&(0x7f00000004c0)={0x34, 0x0, 0x400, 0x70bd26, 0x25dfdbff, {}, [@BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x2}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x2}, @BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}]}, 0x34}, 0x1, 0x0, 0x0, 0x1}, 0x20000000) syz_emit_ethernet(0x6a, &(0x7f0000000180)={@link_local, @random="8a061b827e90", @val={@void, {0x8100, 0x4, 0x1}}, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) r4 = socket$nl_generic(0x10, 0x3, 0x10) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendmsg$IPCTNL_MSG_CT_GET_DYING(0xffffffffffffffff, &(0x7f0000000440)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000400)={&(0x7f00000003c0)={0x14, 0x6, 0x1, 0x5, 0x0, 0x0, {0x7, 0x0, 0x5}, [""]}, 0x14}}, 0x40080) r7 = syz_genetlink_get_family_id$batadv(&(0x7f0000000240), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(r4, &(0x7f0000000340)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000300)={&(0x7f0000000280)={0x44, r7, 0x8, 0x70bd2c, 0x25dfdbfd, {}, [@BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED={0x5}, @BATADV_ATTR_NETWORK_CODING_ENABLED={0x5}, @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5, 0x2e, 0x1}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x1}, @BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED={0x5, 0x37, 0x1}, @BATADV_ATTR_HARD_IFINDEX={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x81}, 0x20044005) [ 2651.184531][T26895] bond1119: entered promiscuous mode [ 2651.191049][T26895] 8021q: adding VLAN 0 to HW filter on device bond1119 [ 2651.277144][T26899] bond1119: (slave bridge1049): making interface the new active one [ 2651.286502][T26899] bridge1049: entered promiscuous mode [ 2651.301655][T26899] bond1119: (slave bridge1049): Enslaving as an active interface with an up link 19:47:25 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x81000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2651.323550][T26909] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2651.452366][T26909] bond1197: entered promiscuous mode [ 2651.467080][T26909] 8021q: adding VLAN 0 to HW filter on device bond1197 [ 2651.555787][T26911] bond1197: (slave bridge1100): making interface the new active one [ 2651.566010][T26911] bridge1100: entered promiscuous mode [ 2651.589558][T26911] bond1197: (slave bridge1100): Enslaving as an active interface with an up link 19:47:25 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x65150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2651.600142][T26914] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2651.627303][T26915] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2651.651724][T26918] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.0'. 19:47:25 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) 19:47:25 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) [ 2651.679752][T26917] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2651.698326][T26928] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:25 executing program 3: socket$nl_generic(0x10, 0x3, 0x10) (async) r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) r3 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000040), 0xffffffffffffffff) sendmsg$IPVS_CMD_GET_CONFIG(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)={0x68, r3, 0x20, 0x70bd28, 0x25dfdbfc, {}, [@IPVS_CMD_ATTR_DEST={0x28, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ADDR={0x14, 0x1, @ipv4=@dev={0xac, 0x14, 0x14, 0x3d}}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e22}, @IPVS_DEST_ATTR_TUN_FLAGS={0x6}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x40}, @IPVS_CMD_ATTR_SERVICE={0x24, 0x1, 0x0, 0x1, [@IPVS_SVC_ATTR_AF={0x6, 0x1, 0xa}, @IPVS_SVC_ATTR_PORT={0x6, 0x4, 0x4e24}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0x400}, @IPVS_SVC_ATTR_TIMEOUT={0x8}]}]}, 0x68}, 0x1, 0x0, 0x0, 0x40}, 0x0) sendmsg$BATADV_CMD_GET_VLAN(r0, &(0x7f0000000540)={&(0x7f0000000480)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000500)={&(0x7f00000004c0)={0x34, 0x0, 0x400, 0x70bd26, 0x25dfdbff, {}, [@BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x2}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x2}, @BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}]}, 0x34}, 0x1, 0x0, 0x0, 0x1}, 0x20000000) syz_emit_ethernet(0x6a, &(0x7f0000000180)={@link_local, @random="8a061b827e90", @val={@void, {0x8100, 0x4, 0x1}}, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) socket$nl_generic(0x10, 0x3, 0x10) (async) r4 = socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendmsg$IPCTNL_MSG_CT_GET_DYING(0xffffffffffffffff, &(0x7f0000000440)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000400)={&(0x7f00000003c0)={0x14, 0x6, 0x1, 0x5, 0x0, 0x0, {0x7, 0x0, 0x5}, [""]}, 0x14}}, 0x40080) (async) sendmsg$IPCTNL_MSG_CT_GET_DYING(0xffffffffffffffff, &(0x7f0000000440)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000400)={&(0x7f00000003c0)={0x14, 0x6, 0x1, 0x5, 0x0, 0x0, {0x7, 0x0, 0x5}, [""]}, 0x14}}, 0x40080) r7 = syz_genetlink_get_family_id$batadv(&(0x7f0000000240), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(r4, &(0x7f0000000340)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000300)={&(0x7f0000000280)={0x44, r7, 0x8, 0x70bd2c, 0x25dfdbfd, {}, [@BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED={0x5}, @BATADV_ATTR_NETWORK_CODING_ENABLED={0x5}, @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5, 0x2e, 0x1}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x1}, @BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED={0x5, 0x37, 0x1}, @BATADV_ATTR_HARD_IFINDEX={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x81}, 0x20044005) [ 2651.805806][T26928] bond1236: entered promiscuous mode [ 2651.812500][T26928] 8021q: adding VLAN 0 to HW filter on device bond1236 [ 2651.946092][T26931] bond1236: (slave bridge1166): making interface the new active one [ 2651.975199][T26931] bridge1166: entered promiscuous mode [ 2652.006223][T26931] bond1236: (slave bridge1166): Enslaving as an active interface with an up link 19:47:26 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6be70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2652.132241][T26936] bond1120: entered promiscuous mode [ 2652.139025][T26936] 8021q: adding VLAN 0 to HW filter on device bond1120 19:47:26 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x88a8ffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2652.272813][T26937] bond1120: (slave bridge1050): making interface the new active one [ 2652.285173][T26937] bridge1050: entered promiscuous mode [ 2652.304574][T26937] bond1120: (slave bridge1050): Enslaving as an active interface with an up link [ 2652.421727][T26939] bond1198: entered promiscuous mode [ 2652.438094][T26939] 8021q: adding VLAN 0 to HW filter on device bond1198 [ 2652.531371][T26941] bond1198: (slave bridge1101): making interface the new active one [ 2652.539559][T26941] bridge1101: entered promiscuous mode [ 2652.549890][T26941] bond1198: (slave bridge1101): Enslaving as an active interface with an up link 19:47:26 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) r3 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000040), 0xffffffffffffffff) sendmsg$IPVS_CMD_GET_CONFIG(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)={0x68, r3, 0x20, 0x70bd28, 0x25dfdbfc, {}, [@IPVS_CMD_ATTR_DEST={0x28, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ADDR={0x14, 0x1, @ipv4=@dev={0xac, 0x14, 0x14, 0x3d}}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e22}, @IPVS_DEST_ATTR_TUN_FLAGS={0x6}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x40}, @IPVS_CMD_ATTR_SERVICE={0x24, 0x1, 0x0, 0x1, [@IPVS_SVC_ATTR_AF={0x6, 0x1, 0xa}, @IPVS_SVC_ATTR_PORT={0x6, 0x4, 0x4e24}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0x400}, @IPVS_SVC_ATTR_TIMEOUT={0x8}]}]}, 0x68}, 0x1, 0x0, 0x0, 0x40}, 0x0) sendmsg$BATADV_CMD_GET_VLAN(r0, &(0x7f0000000540)={&(0x7f0000000480)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000500)={&(0x7f00000004c0)={0x34, 0x0, 0x400, 0x70bd26, 0x25dfdbff, {}, [@BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x2}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x2}, @BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}]}, 0x34}, 0x1, 0x0, 0x0, 0x1}, 0x20000000) syz_emit_ethernet(0x6a, &(0x7f0000000180)={@link_local, @random="8a061b827e90", @val={@void, {0x8100, 0x4, 0x1}}, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) r4 = socket$nl_generic(0x10, 0x3, 0x10) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) sendmsg$IPCTNL_MSG_CT_GET_DYING(0xffffffffffffffff, &(0x7f0000000440)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000400)={&(0x7f00000003c0)={0x14, 0x6, 0x1, 0x5, 0x0, 0x0, {0x7, 0x0, 0x5}, [""]}, 0x14}}, 0x40080) r7 = syz_genetlink_get_family_id$batadv(&(0x7f0000000240), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(r4, &(0x7f0000000340)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000300)={&(0x7f0000000280)={0x44, r7, 0x8, 0x70bd2c, 0x25dfdbfd, {}, [@BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED={0x5}, @BATADV_ATTR_NETWORK_CODING_ENABLED={0x5}, @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5, 0x2e, 0x1}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x1}, @BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED={0x5, 0x37, 0x1}, @BATADV_ATTR_HARD_IFINDEX={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x81}, 0x20044005) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) (async) syz_genetlink_get_family_id$ipvs(&(0x7f0000000040), 0xffffffffffffffff) (async) sendmsg$IPVS_CMD_GET_CONFIG(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)={0x68, r3, 0x20, 0x70bd28, 0x25dfdbfc, {}, [@IPVS_CMD_ATTR_DEST={0x28, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ADDR={0x14, 0x1, @ipv4=@dev={0xac, 0x14, 0x14, 0x3d}}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e22}, @IPVS_DEST_ATTR_TUN_FLAGS={0x6}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x40}, @IPVS_CMD_ATTR_SERVICE={0x24, 0x1, 0x0, 0x1, [@IPVS_SVC_ATTR_AF={0x6, 0x1, 0xa}, @IPVS_SVC_ATTR_PORT={0x6, 0x4, 0x4e24}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0x400}, @IPVS_SVC_ATTR_TIMEOUT={0x8}]}]}, 0x68}, 0x1, 0x0, 0x0, 0x40}, 0x0) (async) sendmsg$BATADV_CMD_GET_VLAN(r0, &(0x7f0000000540)={&(0x7f0000000480)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000500)={&(0x7f00000004c0)={0x34, 0x0, 0x400, 0x70bd26, 0x25dfdbff, {}, [@BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x2}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x2}, @BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}]}, 0x34}, 0x1, 0x0, 0x0, 0x1}, 0x20000000) (async) syz_emit_ethernet(0x6a, &(0x7f0000000180)={@link_local, @random="8a061b827e90", @val={@void, {0x8100, 0x4, 0x1}}, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r4, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r4, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r5, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r6}, @void}}}, 0x1c}}, 0x0) (async) sendmsg$IPCTNL_MSG_CT_GET_DYING(0xffffffffffffffff, &(0x7f0000000440)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000400)={&(0x7f00000003c0)={0x14, 0x6, 0x1, 0x5, 0x0, 0x0, {0x7, 0x0, 0x5}, [""]}, 0x14}}, 0x40080) (async) syz_genetlink_get_family_id$batadv(&(0x7f0000000240), 0xffffffffffffffff) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(r4, &(0x7f0000000340)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f0000000300)={&(0x7f0000000280)={0x44, r7, 0x8, 0x70bd2c, 0x25dfdbfd, {}, [@BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED={0x5}, @BATADV_ATTR_NETWORK_CODING_ENABLED={0x5}, @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5, 0x2e, 0x1}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x1}, @BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED={0x5, 0x37, 0x1}, @BATADV_ATTR_HARD_IFINDEX={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x81}, 0x20044005) (async) 19:47:26 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x65580000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:26 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) 19:47:26 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$tipc(&(0x7f00000001c0), 0xffffffffffffffff) [ 2652.889484][T26960] bond1237: entered promiscuous mode [ 2652.896144][T26960] 8021q: adding VLAN 0 to HW filter on device bond1237 [ 2653.057962][T26963] bond1237: (slave bridge1167): making interface the new active one [ 2653.072618][T26963] bridge1167: entered promiscuous mode [ 2653.095838][T26963] bond1237: (slave bridge1167): Enslaving as an active interface with an up link 19:47:27 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6c000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2653.214200][T26966] bond1121: entered promiscuous mode [ 2653.227614][T26966] 8021q: adding VLAN 0 to HW filter on device bond1121 [ 2653.331288][T26967] bond1121: (slave bridge1051): making interface the new active one [ 2653.339893][T26967] bridge1051: entered promiscuous mode [ 2653.354395][T26967] bond1121: (slave bridge1051): Enslaving as an active interface with an up link 19:47:27 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8a600000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2653.381788][T26973] validate_nla: 4 callbacks suppressed [ 2653.381843][T26973] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:27 executing program 3: recvmsg(0xffffffffffffffff, &(0x7f0000000200)={&(0x7f0000000000), 0x80, &(0x7f00000001c0)=[{&(0x7f0000000240)=""/65, 0x41}, {&(0x7f00000000c0)=""/202, 0xca}], 0x2}, 0x1) r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = syz_genetlink_get_family_id$gtp(&(0x7f00000001c0), r3) sendmsg$GTP_CMD_GETPDP(r3, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000200)={0x14, r4, 0xb01}, 0x14}}, 0x0) sendmsg$GTP_CMD_GETPDP(r0, &(0x7f0000000340)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000300)={&(0x7f00000002c0)={0x14, r4, 0x4, 0x70bd2b, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x40080}, 0x40448c4) mmap(&(0x7f0000fff000/0x1000)=nil, 0x1000, 0x3000008, 0x30, 0xffffffffffffffff, 0x7d296000) [ 2653.559986][T26973] bond1199: entered promiscuous mode [ 2653.574696][T26973] 8021q: adding VLAN 0 to HW filter on device bond1199 [ 2653.707795][T26985] bond1199: (slave bridge1102): making interface the new active one [ 2653.736014][T26985] bridge1102: entered promiscuous mode 19:47:27 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x66150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:27 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) socket$nl_generic(0x10, 0x3, 0x10) [ 2653.758164][T26985] bond1199: (slave bridge1102): Enslaving as an active interface with an up link 19:47:27 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r3, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r3, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r3, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r4 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r4, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r4, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r2, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r6, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r7, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r7, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) [ 2653.863667][T26994] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2654.018186][T26994] bond1238: entered promiscuous mode [ 2654.041576][T26994] 8021q: adding VLAN 0 to HW filter on device bond1238 19:47:28 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6ce70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2654.145325][T26995] bond1238: (slave bridge1168): making interface the new active one [ 2654.164271][T26995] bridge1168: entered promiscuous mode [ 2654.178349][T26995] bond1238: (slave bridge1168): Enslaving as an active interface with an up link [ 2654.234686][T26998] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2654.336433][T26998] bond1122: entered promiscuous mode [ 2654.354152][T26998] 8021q: adding VLAN 0 to HW filter on device bond1122 [ 2654.465766][T27001] bond1122: (slave bridge1052): making interface the new active one [ 2654.476636][T27001] bridge1052: entered promiscuous mode [ 2654.498015][T27001] bond1122: (slave bridge1052): Enslaving as an active interface with an up link 19:47:28 executing program 3: recvmsg(0xffffffffffffffff, &(0x7f0000000200)={&(0x7f0000000000), 0x80, &(0x7f00000001c0)=[{&(0x7f0000000240)=""/65, 0x41}, {&(0x7f00000000c0)=""/202, 0xca}], 0x2}, 0x1) (async) r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) (async, rerun: 32) r3 = socket$nl_generic(0x10, 0x3, 0x10) (rerun: 32) r4 = syz_genetlink_get_family_id$gtp(&(0x7f00000001c0), r3) sendmsg$GTP_CMD_GETPDP(r3, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000200)={0x14, r4, 0xb01}, 0x14}}, 0x0) sendmsg$GTP_CMD_GETPDP(r0, &(0x7f0000000340)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000300)={&(0x7f00000002c0)={0x14, r4, 0x4, 0x70bd2b, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x40080}, 0x40448c4) (async) mmap(&(0x7f0000fff000/0x1000)=nil, 0x1000, 0x3000008, 0x30, 0xffffffffffffffff, 0x7d296000) 19:47:28 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x9effffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2654.553512][T27009] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2654.671264][T27009] bond1200: entered promiscuous mode [ 2654.695452][T27009] 8021q: adding VLAN 0 to HW filter on device bond1200 19:47:28 executing program 3: recvmsg(0xffffffffffffffff, &(0x7f0000000200)={&(0x7f0000000000), 0x80, &(0x7f00000001c0)=[{&(0x7f0000000240)=""/65, 0x41}, {&(0x7f00000000c0)=""/202, 0xca}], 0x2}, 0x1) r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = syz_genetlink_get_family_id$gtp(&(0x7f00000001c0), r3) sendmsg$GTP_CMD_GETPDP(r3, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000200)={0x14, r4, 0xb01}, 0x14}}, 0x0) sendmsg$GTP_CMD_GETPDP(r0, &(0x7f0000000340)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000300)={&(0x7f00000002c0)={0x14, r4, 0x4, 0x70bd2b, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x40080}, 0x40448c4) mmap(&(0x7f0000fff000/0x1000)=nil, 0x1000, 0x3000008, 0x30, 0xffffffffffffffff, 0x7d296000) recvmsg(0xffffffffffffffff, &(0x7f0000000200)={&(0x7f0000000000), 0x80, &(0x7f00000001c0)=[{&(0x7f0000000240)=""/65, 0x41}, {&(0x7f00000000c0)=""/202, 0xca}], 0x2}, 0x1) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$gtp(&(0x7f00000001c0), r3) (async) sendmsg$GTP_CMD_GETPDP(r3, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000200)={0x14, r4, 0xb01}, 0x14}}, 0x0) (async) sendmsg$GTP_CMD_GETPDP(r0, &(0x7f0000000340)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000300)={&(0x7f00000002c0)={0x14, r4, 0x4, 0x70bd2b, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x40080}, 0x40448c4) (async) mmap(&(0x7f0000fff000/0x1000)=nil, 0x1000, 0x3000008, 0x30, 0xffffffffffffffff, 0x7d296000) (async) [ 2654.890605][T27011] bond1200: (slave bridge1103): making interface the new active one [ 2654.900101][T27011] bridge1103: entered promiscuous mode [ 2654.923765][T27011] bond1200: (slave bridge1103): Enslaving as an active interface with an up link 19:47:29 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x67150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2654.939435][T27008] __nla_validate_parse: 14 callbacks suppressed [ 2654.939464][T27008] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2654.978758][T27012] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2654.996988][T27013] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2655.021012][T27014] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2655.039741][T27015] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.0'. 19:47:29 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r3, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r3, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r3, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r4 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r4, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r4, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r2, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r6, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r7, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) 19:47:29 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) [ 2655.073043][T27019] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2655.146377][T27019] workqueue: Failed to create a rescuer kthread for wq "bond1239": -EINTR [ 2655.479292][T27026] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:29 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6de70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:29 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xa1ffffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:29 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) r3 = socket$netlink(0x10, 0x3, 0x2) sendmsg$DEVLINK_CMD_RATE_GET(r3, &(0x7f0000000280)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000240)={&(0x7f0000000140)={0xc4, 0x0, 0x800, 0x70bd2c, 0x25dfdbff, {}, [@DEVLINK_ATTR_RATE_NODE_NAME={0xf, 0xa8, @name2}, @DEVLINK_ATTR_RATE_NODE_NAME={0x9d, 0xa8, @random="61bfea00f646684911bce065547facc042b459b38486ef37beaff2e4505e1bc1ed41a5f75a7a3a815daab3ebd1d716c8029773000c4222a2513495e1b63c85345672f019115c5a9a5a9d1f5d1c94c73817739f1289e0a524fa79ab95cc3ac3789bfd5ffb9871c0ec4d92f4057244ca9b4d090b0d70a0a00870c2852de3c7b767fa93cd50f9ee39e33e0955ef94072de1968181076a04991948"}]}, 0xc4}, 0x1, 0x0, 0x0, 0x4000}, 0x40) sendmsg$ETHTOOL_MSG_LINKSTATE_GET(r0, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x34, 0x0, 0x8, 0x70bd26, 0x25dfdbff, {}, [@HEADER={0x20, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_INDEX={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'gretap0\x00'}]}]}, 0x34}, 0x1, 0x0, 0x0, 0x4}, 0x54) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) [ 2655.530926][T27026] workqueue: Failed to create a rescuer kthread for wq "bond1123": -EINTR [ 2655.967354][T27038] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2656.034606][T27038] workqueue: Failed to create a rescuer kthread for wq "bond1201": -EINTR [ 2656.179190][T27044] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2656.203023][T27047] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2656.219910][T27048] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.0'. 19:47:30 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r5, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) 19:47:30 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x68000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2656.307696][T27049] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2656.328987][T27054] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:30 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) 19:47:30 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) 19:47:30 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) socket(0x1, 0x803, 0x0) 19:47:30 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) [ 2656.593573][T27054] bond1239: entered promiscuous mode [ 2656.608206][T27054] 8021q: adding VLAN 0 to HW filter on device bond1239 19:47:30 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6ee70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2656.635028][T27057] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:30 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) socket(0x1, 0x803, 0x0) [ 2656.839356][T27057] bond1123: entered promiscuous mode [ 2656.854803][T27057] 8021q: adding VLAN 0 to HW filter on device bond1123 19:47:31 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) [ 2657.048164][T27060] bond1123: (slave bridge1053): making interface the new active one [ 2657.082066][ T1232] ieee802154 phy0 wpan0: encryption failed: -22 [ 2657.089091][ T1232] ieee802154 phy1 wpan1: encryption failed: -22 19:47:31 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r1, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r1, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r2 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r2, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r2, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) [ 2657.111181][T27060] bridge1053: entered promiscuous mode [ 2657.140305][T27060] bond1123: (slave bridge1053): Enslaving as an active interface with an up link 19:47:31 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xb0000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:31 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async, rerun: 32) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) (rerun: 32) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) r3 = socket$netlink(0x10, 0x3, 0x2) sendmsg$DEVLINK_CMD_RATE_GET(r3, &(0x7f0000000280)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000240)={&(0x7f0000000140)={0xc4, 0x0, 0x800, 0x70bd2c, 0x25dfdbff, {}, [@DEVLINK_ATTR_RATE_NODE_NAME={0xf, 0xa8, @name2}, @DEVLINK_ATTR_RATE_NODE_NAME={0x9d, 0xa8, @random="61bfea00f646684911bce065547facc042b459b38486ef37beaff2e4505e1bc1ed41a5f75a7a3a815daab3ebd1d716c8029773000c4222a2513495e1b63c85345672f019115c5a9a5a9d1f5d1c94c73817739f1289e0a524fa79ab95cc3ac3789bfd5ffb9871c0ec4d92f4057244ca9b4d090b0d70a0a00870c2852de3c7b767fa93cd50f9ee39e33e0955ef94072de1968181076a04991948"}]}, 0xc4}, 0x1, 0x0, 0x0, 0x4000}, 0x40) (async) sendmsg$ETHTOOL_MSG_LINKSTATE_GET(r0, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x34, 0x0, 0x8, 0x70bd26, 0x25dfdbff, {}, [@HEADER={0x20, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_INDEX={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'gretap0\x00'}]}]}, 0x34}, 0x1, 0x0, 0x0, 0x4}, 0x54) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) [ 2657.164621][T27071] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:31 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r1, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r1, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r2 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r2, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) [ 2657.415162][T27071] bond1201: entered promiscuous mode [ 2657.421260][T27071] 8021q: adding VLAN 0 to HW filter on device bond1201 [ 2657.627874][T27074] bond1201: (slave bridge1104): making interface the new active one [ 2657.653667][T27074] bridge1104: entered promiscuous mode [ 2657.679530][T27074] bond1201: (slave bridge1104): Enslaving as an active interface with an up link [ 2657.693625][T27079] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.0'. 19:47:31 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x68150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:31 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r0, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) 19:47:31 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) (async) r3 = socket$netlink(0x10, 0x3, 0x2) sendmsg$DEVLINK_CMD_RATE_GET(r3, &(0x7f0000000280)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000240)={&(0x7f0000000140)={0xc4, 0x0, 0x800, 0x70bd2c, 0x25dfdbff, {}, [@DEVLINK_ATTR_RATE_NODE_NAME={0xf, 0xa8, @name2}, @DEVLINK_ATTR_RATE_NODE_NAME={0x9d, 0xa8, @random="61bfea00f646684911bce065547facc042b459b38486ef37beaff2e4505e1bc1ed41a5f75a7a3a815daab3ebd1d716c8029773000c4222a2513495e1b63c85345672f019115c5a9a5a9d1f5d1c94c73817739f1289e0a524fa79ab95cc3ac3789bfd5ffb9871c0ec4d92f4057244ca9b4d090b0d70a0a00870c2852de3c7b767fa93cd50f9ee39e33e0955ef94072de1968181076a04991948"}]}, 0xc4}, 0x1, 0x0, 0x0, 0x4000}, 0x40) (async) sendmsg$ETHTOOL_MSG_LINKSTATE_GET(r0, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x34, 0x0, 0x8, 0x70bd26, 0x25dfdbff, {}, [@HEADER={0x20, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_INDEX={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'gretap0\x00'}]}]}, 0x34}, 0x1, 0x0, 0x0, 0x4}, 0x54) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) 19:47:31 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) 19:47:31 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r0, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) 19:47:31 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6fe70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2657.727046][T27082] workqueue: Failed to create a rescuer kthread for wq "bond1240": -EINTR [ 2657.949032][T27097] bond1124: entered promiscuous mode [ 2657.964065][T27097] 8021q: adding VLAN 0 to HW filter on device bond1124 19:47:32 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r0, 0x7}) 19:47:32 executing program 3: sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000000600)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000040)={&(0x7f0000000100)=@newtclass={0x4d8, 0x28, 0x200, 0x70bd2b, 0x25dfdbfd, {0x0, 0x0, 0x0, 0x0, {0xfff1, 0x10}, {0xa, 0xfff1}, {0xfff3, 0xffff}}, [@tclass_kind_options=@c_htb={{0x8}, {0x4ac, 0x2, [@TCA_HTB_PARMS={0x30, 0x1, {{0x7, 0x0, 0x40, 0xfff, 0x3, 0x5}, {0x3f, 0x2, 0x9, 0x5, 0x1, 0xa4}, 0x7, 0x4, 0x4, 0x4, 0xb90}}, @TCA_HTB_PARMS={0x30, 0x1, {{0x7, 0x2, 0x8, 0x1, 0x5, 0x9}, {0x7f, 0x2, 0x9, 0x2, 0xfff, 0x400}, 0x66, 0x8802, 0x1b260374, 0x456d, 0x8}}, @TCA_HTB_OFFLOAD={0x4}, @TCA_HTB_OFFLOAD={0x4}, @TCA_HTB_PARMS={0x30, 0x1, {{0xf2, 0x1, 0x4cbb, 0x5f3, 0x100, 0x2f}, {0x8, 0x2, 0x401, 0x28f, 0x3, 0x32f}, 0x0, 0x7, 0x8, 0xfffffff7, 0x1}}, @TCA_HTB_RATE64={0xc, 0x6, 0x6}, @TCA_HTB_RTAB={0x404, 0x4, [0x1ff, 0x4, 0x4, 0x100, 0x5, 0x80000001, 0x7fff, 0x81, 0x1, 0x2, 0x101, 0x40, 0x3, 0x101, 0xd8, 0x1ff, 0x401, 0x0, 0x7fffffff, 0x5, 0x0, 0x1000, 0x4800000, 0x96, 0x1000, 0xffff, 0x30, 0x7, 0xa1, 0xffffffff, 0xfffffffe, 0x9b, 0x8, 0xfffffff7, 0x5, 0x7, 0xffffffff, 0x81, 0x4, 0x7f, 0x14, 0x8, 0xfff, 0xfff, 0xfffffff7, 0x1, 0x7b5, 0x0, 0xff6d, 0xffff, 0x6, 0x401, 0x3, 0x8, 0xb8c3, 0xeaae, 0x7, 0x9, 0x3, 0xfffffff7, 0x5, 0x5, 0x3ff, 0x9, 0x40, 0x6, 0x0, 0x0, 0x7f, 0x7ff, 0x1f, 0x0, 0x8, 0x9, 0x2, 0x80000000, 0xfff, 0x9, 0x3800, 0x4685, 0x400, 0x2, 0x9, 0x28, 0x6, 0x9, 0x400, 0x5, 0x6d9, 0x5, 0xfffffff9, 0x0, 0x1, 0x1, 0xfffffff8, 0x1000, 0x9, 0x8, 0x7fff, 0x5, 0x2, 0x10001, 0xffffffff, 0x7, 0x6, 0xfffffffe, 0x6, 0x7fffffff, 0xe2, 0x4, 0x7f, 0x135, 0xab46, 0x7, 0x7, 0x0, 0x5, 0x10001, 0x8000, 0x9, 0x4, 0x9, 0x1, 0x2, 0x4, 0x5ec, 0x1, 0x5, 0xf91, 0x4, 0x7, 0xb37a, 0x5, 0xb5, 0x148, 0x6, 0xffff, 0x8, 0x5, 0x7fff, 0x5983, 0x3, 0x7, 0xfffffff7, 0x65f, 0x6, 0x80000001, 0x10000, 0x1, 0x80000000, 0x1, 0x2, 0xc267, 0x2, 0x7ff, 0xffffffff, 0x2, 0x8, 0x1ff, 0x36, 0xffffffff, 0x5b, 0x1, 0x4, 0xc4, 0x9, 0x8000, 0x8, 0xa0b, 0x5, 0x9, 0x3, 0x9, 0x7, 0x0, 0xfffffeff, 0x6, 0x8, 0x20, 0x0, 0x200, 0x9, 0x0, 0x7, 0x5, 0x101, 0x7, 0x9, 0xffff, 0x0, 0x0, 0x9, 0xfff, 0x5, 0x80, 0x7, 0x7f, 0x7, 0x8, 0x8000, 0x136f, 0x849, 0xfff, 0x1, 0x0, 0xde, 0x9, 0x7ff, 0x1, 0x0, 0x400, 0xff3, 0x80000000, 0x7, 0x2, 0x0, 0x10001, 0x683, 0x9, 0x101, 0x0, 0x8000, 0x4f43, 0x4, 0x7ca6, 0x0, 0xb83ce1cc, 0x8, 0x3, 0xffff, 0x81, 0x4, 0x6, 0x9, 0x7ff, 0x4, 0x1, 0x9, 0x8, 0x20, 0x4a5cb832, 0x24, 0x65, 0xfffff800, 0xdfb, 0xffffd4b7, 0xefdc, 0x2, 0xe2, 0x4f, 0x1, 0x8, 0x1e2, 0x9, 0xfffffffe, 0xfffffffb]}]}}]}, 0x4d8}, 0x1, 0x0, 0x0, 0x8040041}, 0x1) r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) ioctl$sock_SIOCETHTOOL(r0, 0x8946, &(0x7f0000000680)={'veth1_macvtap\x00', &(0x7f0000000640)=@ethtool_perm_addr={0x20, 0x7, "79a89962603404"}}) syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c2000000f7e31b827e80080045000058717864010100640101020420880b0000000000110800000086dd080088be00000000100000000100000000008f0000080022eb000000002000000002000000000000000000000008006558000000000000000000"], 0x0) syz_emit_ethernet(0x22, &(0x7f00000000c0)={@local, @empty, @val={@void, {0x8100, 0x3, 0x0, 0x2}}, {@can={0xc, {{0x1, 0x0, 0x1}, 0x1, 0x2, 0x0, 0x0, "07000000cdf04000"}}}}, &(0x7f0000000080)={0x0, 0x1, [0xe5d, 0xb0b, 0x868, 0x91b]}) [ 2658.157462][T27103] bond1124: (slave bridge1054): making interface the new active one [ 2658.168075][T27103] bridge1054: entered promiscuous mode [ 2658.181814][T27103] bond1124: (slave bridge1054): Enslaving as an active interface with an up link 19:47:32 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xb8000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:32 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) [ 2658.341474][T27110] bond1202: entered promiscuous mode [ 2658.368401][T27110] 8021q: adding VLAN 0 to HW filter on device bond1202 19:47:32 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) [ 2658.615481][T27112] bond1202: (slave bridge1105): making interface the new active one [ 2658.644874][T27112] bridge1105: entered promiscuous mode 19:47:32 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) [ 2658.666832][T27112] bond1202: (slave bridge1105): Enslaving as an active interface with an up link [ 2658.704669][T27121] validate_nla: 3 callbacks suppressed [ 2658.704718][T27121] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:32 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x69150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:32 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2658.841405][T27121] bond1240: entered promiscuous mode [ 2658.888789][T27121] 8021q: adding VLAN 0 to HW filter on device bond1240 19:47:33 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2659.228735][T27126] bond1240: (slave bridge1169): making interface the new active one [ 2659.257213][T27126] bridge1169: entered promiscuous mode 19:47:33 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r3, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r3, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r3, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r4 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r4, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r4, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r2, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r6, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r7, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r7, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) 19:47:33 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2659.278751][T27126] bond1240: (slave bridge1169): Enslaving as an active interface with an up link 19:47:33 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x70e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:33 executing program 3: sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000000600)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000040)={&(0x7f0000000100)=@newtclass={0x4d8, 0x28, 0x200, 0x70bd2b, 0x25dfdbfd, {0x0, 0x0, 0x0, 0x0, {0xfff1, 0x10}, {0xa, 0xfff1}, {0xfff3, 0xffff}}, [@tclass_kind_options=@c_htb={{0x8}, {0x4ac, 0x2, [@TCA_HTB_PARMS={0x30, 0x1, {{0x7, 0x0, 0x40, 0xfff, 0x3, 0x5}, {0x3f, 0x2, 0x9, 0x5, 0x1, 0xa4}, 0x7, 0x4, 0x4, 0x4, 0xb90}}, @TCA_HTB_PARMS={0x30, 0x1, {{0x7, 0x2, 0x8, 0x1, 0x5, 0x9}, {0x7f, 0x2, 0x9, 0x2, 0xfff, 0x400}, 0x66, 0x8802, 0x1b260374, 0x456d, 0x8}}, @TCA_HTB_OFFLOAD={0x4}, @TCA_HTB_OFFLOAD={0x4}, @TCA_HTB_PARMS={0x30, 0x1, {{0xf2, 0x1, 0x4cbb, 0x5f3, 0x100, 0x2f}, {0x8, 0x2, 0x401, 0x28f, 0x3, 0x32f}, 0x0, 0x7, 0x8, 0xfffffff7, 0x1}}, @TCA_HTB_RATE64={0xc, 0x6, 0x6}, @TCA_HTB_RTAB={0x404, 0x4, [0x1ff, 0x4, 0x4, 0x100, 0x5, 0x80000001, 0x7fff, 0x81, 0x1, 0x2, 0x101, 0x40, 0x3, 0x101, 0xd8, 0x1ff, 0x401, 0x0, 0x7fffffff, 0x5, 0x0, 0x1000, 0x4800000, 0x96, 0x1000, 0xffff, 0x30, 0x7, 0xa1, 0xffffffff, 0xfffffffe, 0x9b, 0x8, 0xfffffff7, 0x5, 0x7, 0xffffffff, 0x81, 0x4, 0x7f, 0x14, 0x8, 0xfff, 0xfff, 0xfffffff7, 0x1, 0x7b5, 0x0, 0xff6d, 0xffff, 0x6, 0x401, 0x3, 0x8, 0xb8c3, 0xeaae, 0x7, 0x9, 0x3, 0xfffffff7, 0x5, 0x5, 0x3ff, 0x9, 0x40, 0x6, 0x0, 0x0, 0x7f, 0x7ff, 0x1f, 0x0, 0x8, 0x9, 0x2, 0x80000000, 0xfff, 0x9, 0x3800, 0x4685, 0x400, 0x2, 0x9, 0x28, 0x6, 0x9, 0x400, 0x5, 0x6d9, 0x5, 0xfffffff9, 0x0, 0x1, 0x1, 0xfffffff8, 0x1000, 0x9, 0x8, 0x7fff, 0x5, 0x2, 0x10001, 0xffffffff, 0x7, 0x6, 0xfffffffe, 0x6, 0x7fffffff, 0xe2, 0x4, 0x7f, 0x135, 0xab46, 0x7, 0x7, 0x0, 0x5, 0x10001, 0x8000, 0x9, 0x4, 0x9, 0x1, 0x2, 0x4, 0x5ec, 0x1, 0x5, 0xf91, 0x4, 0x7, 0xb37a, 0x5, 0xb5, 0x148, 0x6, 0xffff, 0x8, 0x5, 0x7fff, 0x5983, 0x3, 0x7, 0xfffffff7, 0x65f, 0x6, 0x80000001, 0x10000, 0x1, 0x80000000, 0x1, 0x2, 0xc267, 0x2, 0x7ff, 0xffffffff, 0x2, 0x8, 0x1ff, 0x36, 0xffffffff, 0x5b, 0x1, 0x4, 0xc4, 0x9, 0x8000, 0x8, 0xa0b, 0x5, 0x9, 0x3, 0x9, 0x7, 0x0, 0xfffffeff, 0x6, 0x8, 0x20, 0x0, 0x200, 0x9, 0x0, 0x7, 0x5, 0x101, 0x7, 0x9, 0xffff, 0x0, 0x0, 0x9, 0xfff, 0x5, 0x80, 0x7, 0x7f, 0x7, 0x8, 0x8000, 0x136f, 0x849, 0xfff, 0x1, 0x0, 0xde, 0x9, 0x7ff, 0x1, 0x0, 0x400, 0xff3, 0x80000000, 0x7, 0x2, 0x0, 0x10001, 0x683, 0x9, 0x101, 0x0, 0x8000, 0x4f43, 0x4, 0x7ca6, 0x0, 0xb83ce1cc, 0x8, 0x3, 0xffff, 0x81, 0x4, 0x6, 0x9, 0x7ff, 0x4, 0x1, 0x9, 0x8, 0x20, 0x4a5cb832, 0x24, 0x65, 0xfffff800, 0xdfb, 0xffffd4b7, 0xefdc, 0x2, 0xe2, 0x4f, 0x1, 0x8, 0x1e2, 0x9, 0xfffffffe, 0xfffffffb]}]}}]}, 0x4d8}, 0x1, 0x0, 0x0, 0x8040041}, 0x1) r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async, rerun: 64) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) (rerun: 64) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) (async) ioctl$sock_SIOCETHTOOL(r0, 0x8946, &(0x7f0000000680)={'veth1_macvtap\x00', &(0x7f0000000640)=@ethtool_perm_addr={0x20, 0x7, "79a89962603404"}}) (async, rerun: 64) syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c2000000f7e31b827e80080045000058717864010100640101020420880b0000000000110800000086dd080088be00000000100000000100000000008f0000080022eb000000002000000002000000000000000000000008006558000000000000000000"], 0x0) (rerun: 64) syz_emit_ethernet(0x22, &(0x7f00000000c0)={@local, @empty, @val={@void, {0x8100, 0x3, 0x0, 0x2}}, {@can={0xc, {{0x1, 0x0, 0x1}, 0x1, 0x2, 0x0, 0x0, "07000000cdf04000"}}}}, &(0x7f0000000080)={0x0, 0x1, [0xe5d, 0xb0b, 0x868, 0x91b]}) [ 2659.354501][T27137] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:33 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2659.399997][T27137] workqueue: Failed to create a rescuer kthread for wq "bond1125": -EINTR [ 2659.650599][T27151] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:33 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xbf4d0546, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:33 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:33 executing program 3: sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000000600)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000040)={&(0x7f0000000100)=@newtclass={0x4d8, 0x28, 0x200, 0x70bd2b, 0x25dfdbfd, {0x0, 0x0, 0x0, 0x0, {0xfff1, 0x10}, {0xa, 0xfff1}, {0xfff3, 0xffff}}, [@tclass_kind_options=@c_htb={{0x8}, {0x4ac, 0x2, [@TCA_HTB_PARMS={0x30, 0x1, {{0x7, 0x0, 0x40, 0xfff, 0x3, 0x5}, {0x3f, 0x2, 0x9, 0x5, 0x1, 0xa4}, 0x7, 0x4, 0x4, 0x4, 0xb90}}, @TCA_HTB_PARMS={0x30, 0x1, {{0x7, 0x2, 0x8, 0x1, 0x5, 0x9}, {0x7f, 0x2, 0x9, 0x2, 0xfff, 0x400}, 0x66, 0x8802, 0x1b260374, 0x456d, 0x8}}, @TCA_HTB_OFFLOAD={0x4}, @TCA_HTB_OFFLOAD={0x4}, @TCA_HTB_PARMS={0x30, 0x1, {{0xf2, 0x1, 0x4cbb, 0x5f3, 0x100, 0x2f}, {0x8, 0x2, 0x401, 0x28f, 0x3, 0x32f}, 0x0, 0x7, 0x8, 0xfffffff7, 0x1}}, @TCA_HTB_RATE64={0xc, 0x6, 0x6}, @TCA_HTB_RTAB={0x404, 0x4, [0x1ff, 0x4, 0x4, 0x100, 0x5, 0x80000001, 0x7fff, 0x81, 0x1, 0x2, 0x101, 0x40, 0x3, 0x101, 0xd8, 0x1ff, 0x401, 0x0, 0x7fffffff, 0x5, 0x0, 0x1000, 0x4800000, 0x96, 0x1000, 0xffff, 0x30, 0x7, 0xa1, 0xffffffff, 0xfffffffe, 0x9b, 0x8, 0xfffffff7, 0x5, 0x7, 0xffffffff, 0x81, 0x4, 0x7f, 0x14, 0x8, 0xfff, 0xfff, 0xfffffff7, 0x1, 0x7b5, 0x0, 0xff6d, 0xffff, 0x6, 0x401, 0x3, 0x8, 0xb8c3, 0xeaae, 0x7, 0x9, 0x3, 0xfffffff7, 0x5, 0x5, 0x3ff, 0x9, 0x40, 0x6, 0x0, 0x0, 0x7f, 0x7ff, 0x1f, 0x0, 0x8, 0x9, 0x2, 0x80000000, 0xfff, 0x9, 0x3800, 0x4685, 0x400, 0x2, 0x9, 0x28, 0x6, 0x9, 0x400, 0x5, 0x6d9, 0x5, 0xfffffff9, 0x0, 0x1, 0x1, 0xfffffff8, 0x1000, 0x9, 0x8, 0x7fff, 0x5, 0x2, 0x10001, 0xffffffff, 0x7, 0x6, 0xfffffffe, 0x6, 0x7fffffff, 0xe2, 0x4, 0x7f, 0x135, 0xab46, 0x7, 0x7, 0x0, 0x5, 0x10001, 0x8000, 0x9, 0x4, 0x9, 0x1, 0x2, 0x4, 0x5ec, 0x1, 0x5, 0xf91, 0x4, 0x7, 0xb37a, 0x5, 0xb5, 0x148, 0x6, 0xffff, 0x8, 0x5, 0x7fff, 0x5983, 0x3, 0x7, 0xfffffff7, 0x65f, 0x6, 0x80000001, 0x10000, 0x1, 0x80000000, 0x1, 0x2, 0xc267, 0x2, 0x7ff, 0xffffffff, 0x2, 0x8, 0x1ff, 0x36, 0xffffffff, 0x5b, 0x1, 0x4, 0xc4, 0x9, 0x8000, 0x8, 0xa0b, 0x5, 0x9, 0x3, 0x9, 0x7, 0x0, 0xfffffeff, 0x6, 0x8, 0x20, 0x0, 0x200, 0x9, 0x0, 0x7, 0x5, 0x101, 0x7, 0x9, 0xffff, 0x0, 0x0, 0x9, 0xfff, 0x5, 0x80, 0x7, 0x7f, 0x7, 0x8, 0x8000, 0x136f, 0x849, 0xfff, 0x1, 0x0, 0xde, 0x9, 0x7ff, 0x1, 0x0, 0x400, 0xff3, 0x80000000, 0x7, 0x2, 0x0, 0x10001, 0x683, 0x9, 0x101, 0x0, 0x8000, 0x4f43, 0x4, 0x7ca6, 0x0, 0xb83ce1cc, 0x8, 0x3, 0xffff, 0x81, 0x4, 0x6, 0x9, 0x7ff, 0x4, 0x1, 0x9, 0x8, 0x20, 0x4a5cb832, 0x24, 0x65, 0xfffff800, 0xdfb, 0xffffd4b7, 0xefdc, 0x2, 0xe2, 0x4f, 0x1, 0x8, 0x1e2, 0x9, 0xfffffffe, 0xfffffffb]}]}}]}, 0x4d8}, 0x1, 0x0, 0x0, 0x8040041}, 0x1) r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r0, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r1, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r2}, @void}}}, 0x1c}}, 0x0) (async) ioctl$sock_SIOCETHTOOL(r0, 0x8946, &(0x7f0000000680)={'veth1_macvtap\x00', &(0x7f0000000640)=@ethtool_perm_addr={0x20, 0x7, "79a89962603404"}}) syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c2000000f7e31b827e80080045000058717864010100640101020420880b0000000000110800000086dd080088be00000000100000000100000000008f0000080022eb000000002000000002000000000000000000000008006558000000000000000000"], 0x0) (async) syz_emit_ethernet(0x22, &(0x7f00000000c0)={@local, @empty, @val={@void, {0x8100, 0x3, 0x0, 0x2}}, {@can={0xc, {{0x1, 0x0, 0x1}, 0x1, 0x2, 0x0, 0x0, "07000000cdf04000"}}}}, &(0x7f0000000080)={0x0, 0x1, [0xe5d, 0xb0b, 0x868, 0x91b]}) 19:47:33 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6a150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2659.825617][T27151] bond1203: entered promiscuous mode [ 2659.831426][T27151] 8021q: adding VLAN 0 to HW filter on device bond1203 [ 2659.856150][T27169] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:33 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:33 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r3, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r3, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r3, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r4 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r4, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r4, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r2, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r6, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r7, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) [ 2659.967173][T27169] bond1241: entered promiscuous mode [ 2659.974168][T27169] 8021q: adding VLAN 0 to HW filter on device bond1241 19:47:34 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2660.300787][T27175] bond1241: (slave bridge1170): making interface the new active one [ 2660.314747][T27175] bridge1170: entered promiscuous mode 19:47:34 executing program 2: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2660.346606][T27175] bond1241: (slave bridge1170): Enslaving as an active interface with an up link 19:47:34 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x71e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:34 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c21000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb000000002000007493049fd0350cb03b5ffc4c61a88cc60002000000"], 0x0) 19:47:34 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2660.621981][T27191] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:34 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c21000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb000000002000007493049fd0350cb03b5ffc4c61a88cc60002000000"], 0x0) [ 2660.788399][T27191] bond1125: entered promiscuous mode [ 2660.804954][T27191] 8021q: adding VLAN 0 to HW filter on device bond1125 [ 2660.818583][T27187] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:34 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xc3ffffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:34 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000800)=ANY=[@ANYBLOB="0180c21000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb000000002000007493049fd0350cb03b5ffc4c61a88cc60002000000"], 0x0) 19:47:34 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2660.855610][T27187] workqueue: Failed to create a rescuer kthread for wq "bond1204": -EINTR [ 2661.049351][T27196] __nla_validate_parse: 7 callbacks suppressed [ 2661.049378][T27196] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.0'. 19:47:35 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6b150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:35 executing program 3: accept4$phonet_pipe(0xffffffffffffffff, &(0x7f0000000100), &(0x7f0000000140)=0x10, 0x180000) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) syz_emit_ethernet(0x84, &(0x7f0000000000)={@random="e6b0902b713e", @empty, @val={@val={0x9100, 0x5}, {0x8100, 0x7, 0x0, 0x4}}, {@llc={0x4, {@llc={0x4, 0x6, "e8", "0cb1fd5bc4266a33e140fd9d58b09c38fded2769827bdf4951bc2feaf0eb4a553ce105959a7b34b572e87aa78fd7f776036fdafd6f48ce40b3cdb8e1422489beb10625f289abdeea6e973d3a7ce1c6fcbc0ca3ec92d3b5faa9a459c7126b61dc6d3e4c17b3cb6a7a973cb6"}}}}}, &(0x7f00000000c0)={0x0, 0x3, [0xc0, 0x28, 0xe2b, 0x65]}) socket$nl_generic(0x10, 0x3, 0x10) 19:47:35 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:35 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r5, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) [ 2661.162507][T27204] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:35 executing program 3: accept4$phonet_pipe(0xffffffffffffffff, &(0x7f0000000100), &(0x7f0000000140)=0x10, 0x180000) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) syz_emit_ethernet(0x84, &(0x7f0000000000)={@random="e6b0902b713e", @empty, @val={@val={0x9100, 0x5}, {0x8100, 0x7, 0x0, 0x4}}, {@llc={0x4, {@llc={0x4, 0x6, "e8", "0cb1fd5bc4266a33e140fd9d58b09c38fded2769827bdf4951bc2feaf0eb4a553ce105959a7b34b572e87aa78fd7f776036fdafd6f48ce40b3cdb8e1422489beb10625f289abdeea6e973d3a7ce1c6fcbc0ca3ec92d3b5faa9a459c7126b61dc6d3e4c17b3cb6a7a973cb6"}}}}}, &(0x7f00000000c0)={0x0, 0x3, [0xc0, 0x28, 0xe2b, 0x65]}) (async) socket$nl_generic(0x10, 0x3, 0x10) [ 2661.339912][T27204] bond1242: entered promiscuous mode [ 2661.357942][T27204] 8021q: adding VLAN 0 to HW filter on device bond1242 [ 2661.553629][T27209] bond1242: (slave bridge1171): making interface the new active one [ 2661.574263][T27209] bridge1171: entered promiscuous mode [ 2661.596089][T27209] bond1242: (slave bridge1171): Enslaving as an active interface with an up link 19:47:35 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x72e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:35 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:35 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) 19:47:35 executing program 3: accept4$phonet_pipe(0xffffffffffffffff, &(0x7f0000000100), &(0x7f0000000140)=0x10, 0x180000) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) syz_emit_ethernet(0x84, &(0x7f0000000000)={@random="e6b0902b713e", @empty, @val={@val={0x9100, 0x5}, {0x8100, 0x7, 0x0, 0x4}}, {@llc={0x4, {@llc={0x4, 0x6, "e8", "0cb1fd5bc4266a33e140fd9d58b09c38fded2769827bdf4951bc2feaf0eb4a553ce105959a7b34b572e87aa78fd7f776036fdafd6f48ce40b3cdb8e1422489beb10625f289abdeea6e973d3a7ce1c6fcbc0ca3ec92d3b5faa9a459c7126b61dc6d3e4c17b3cb6a7a973cb6"}}}}}, &(0x7f00000000c0)={0x0, 0x3, [0xc0, 0x28, 0xe2b, 0x65]}) socket$nl_generic(0x10, 0x3, 0x10) [ 2661.762115][T27219] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2661.848204][T27219] bond1126: entered promiscuous mode [ 2661.854638][T27219] 8021q: adding VLAN 0 to HW filter on device bond1126 [ 2661.965609][T27221] bond1126: (slave bridge1055): making interface the new active one [ 2661.974654][T27221] bridge1055: entered promiscuous mode [ 2662.000084][T27221] bond1126: (slave bridge1055): Enslaving as an active interface with an up link 19:47:36 executing program 3: r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000440), 0x0, 0x0) ioctl$TUNSETIFINDEX(r0, 0x400454da, &(0x7f00000000c0)) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) 19:47:36 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:36 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xe4ffffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2662.018486][T27234] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2662.134501][T27234] bond1204: entered promiscuous mode [ 2662.146909][T27234] 8021q: adding VLAN 0 to HW filter on device bond1204 [ 2662.253562][T27235] bond1204: (slave bridge1106): making interface the new active one [ 2662.261635][T27235] bridge1106: entered promiscuous mode [ 2662.272157][T27235] bond1204: (slave bridge1106): Enslaving as an active interface with an up link 19:47:36 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) socket(0x1, 0x803, 0x0) 19:47:36 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6c000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:36 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2662.340940][T27245] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:36 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) 19:47:36 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2662.476259][T27245] bond1243: entered promiscuous mode [ 2662.496507][T27245] 8021q: adding VLAN 0 to HW filter on device bond1243 19:47:36 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x73020000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:36 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) 19:47:36 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, 0x0, 0x0) [ 2662.725476][T27247] bond1243: (slave bridge1172): making interface the new active one [ 2662.734154][T27247] bridge1172: entered promiscuous mode [ 2662.749031][T27247] bond1243: (slave bridge1172): Enslaving as an active interface with an up link 19:47:36 executing program 3: r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000440), 0x0, 0x0) ioctl$TUNSETIFINDEX(r0, 0x400454da, &(0x7f00000000c0)) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) 19:47:36 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, 0x0, 0x0) 19:47:36 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) socket(0x1, 0x803, 0x0) [ 2662.959233][T27257] bond1127: entered promiscuous mode [ 2663.014124][T27257] 8021q: adding VLAN 0 to HW filter on device bond1127 [ 2663.195521][T27258] bond1127: (slave bridge1056): making interface the new active one [ 2663.226637][T27258] bridge1056: entered promiscuous mode [ 2663.248924][T27258] bond1127: (slave bridge1056): Enslaving as an active interface with an up link 19:47:37 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xe68c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:37 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, 0x0, 0x0) 19:47:37 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6c150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:37 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) 19:47:37 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r1, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r1, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r2 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r2, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) [ 2663.280414][T27266] workqueue: Failed to create a rescuer kthread for wq "bond1205": -EINTR [ 2663.697372][T27280] bond1244: entered promiscuous mode [ 2663.714945][T27280] 8021q: adding VLAN 0 to HW filter on device bond1244 [ 2663.779080][T27283] bond1244: (slave bridge1173): making interface the new active one [ 2663.787749][T27283] bridge1173: entered promiscuous mode 19:47:37 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r1, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r1, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r2 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r2, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r2, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) [ 2663.825252][T27283] bond1244: (slave bridge1173): Enslaving as an active interface with an up link [ 2663.864641][T27294] validate_nla: 3 callbacks suppressed [ 2663.864664][T27294] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:38 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x73e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:38 executing program 2: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r2, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r2, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r2, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r3, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) 19:47:38 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r1, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r1, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r1, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r2 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r2, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r0], 0x4}}, 0x0) [ 2664.132220][T27294] bond1128: entered promiscuous mode [ 2664.188915][T27294] 8021q: adding VLAN 0 to HW filter on device bond1128 [ 2664.359600][T27295] bond1128: (slave bridge1057): making interface the new active one [ 2664.385811][T27295] bridge1057: entered promiscuous mode [ 2664.415776][T27295] bond1128: (slave bridge1057): Enslaving as an active interface with an up link 19:47:38 executing program 3: r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000440), 0x0, 0x0) ioctl$TUNSETIFINDEX(r0, 0x400454da, &(0x7f00000000c0)) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) openat$tun(0xffffffffffffff9c, &(0x7f0000000440), 0x0, 0x0) (async) ioctl$TUNSETIFINDEX(r0, 0x400454da, &(0x7f00000000c0)) (async) syz_emit_ethernet(0x66, &(0x7f0000000800)={@link_local, @random="8a061b827e90", @void, {@ipv4={0x800, @gre={{0x5, 0x4, 0x0, 0x0, 0x58, 0x0, 0x0, 0x0, 0x2f, 0x0, @rand_addr=0x64010100, @rand_addr=0x64010102}}}}}, 0x0) (async) 19:47:38 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) 19:47:38 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r0, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) 19:47:38 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xe78c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:38 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r0, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) [ 2664.638689][T27303] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2664.705139][T27303] workqueue: Failed to create a rescuer kthread for wq "bond1205": -EINTR [ 2664.967630][T27313] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:39 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6d150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:39 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r0, 0x7}) 19:47:39 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) [ 2665.048343][T27313] workqueue: Failed to create a rescuer kthread for wq "bond1245": -EINTR [ 2665.239719][T27325] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:39 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x74000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:39 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r0, 0xc028660f, &(0x7f0000001300)={0x2880008}) [ 2665.377670][T27325] bond1129: entered promiscuous mode [ 2665.434030][T27325] 8021q: adding VLAN 0 to HW filter on device bond1129 [ 2665.477685][T27323] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. 19:47:39 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r0, 0x660c) [ 2665.544331][T27327] bond1129: (slave bridge1058): making interface the new active one [ 2665.552822][T27327] bridge1058: entered promiscuous mode [ 2665.592120][T27327] bond1129: (slave bridge1058): Enslaving as an active interface with an up link [ 2665.631515][T27330] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. 19:47:39 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xe88c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2665.649285][T27332] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2665.684477][T27345] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2665.763851][T27345] bond1205: entered promiscuous mode [ 2665.769775][T27345] 8021q: adding VLAN 0 to HW filter on device bond1205 [ 2665.960391][T27346] bond1205: (slave bridge1107): making interface the new active one [ 2665.996651][T27346] bridge1107: entered promiscuous mode [ 2666.028143][T27346] bond1205: (slave bridge1107): Enslaving as an active interface with an up link [ 2666.104452][T27351] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2666.191940][T27351] bond1245: entered promiscuous mode [ 2666.198806][T27351] 8021q: adding VLAN 0 to HW filter on device bond1245 [ 2666.258518][T27355] bond1245: (slave bridge1174): making interface the new active one [ 2666.267053][T27355] bridge1174: entered promiscuous mode [ 2666.280988][T27355] bond1245: (slave bridge1174): Enslaving as an active interface with an up link [ 2666.290656][T27360] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2666.369346][T27360] bond1130: entered promiscuous mode [ 2666.375416][T27360] 8021q: adding VLAN 0 to HW filter on device bond1130 [ 2666.425146][T27361] bond1130: (slave bridge1059): making interface the new active one [ 2666.434630][T27361] bridge1059: entered promiscuous mode [ 2666.450412][T27361] bond1130: (slave bridge1059): Enslaving as an active interface with an up link 19:47:40 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb00000000200000000200000000000000000000000800655800000000e835617a534db33489362395d7be1a7f7749e4b03edd69a99173fc32d5d5be6da7633dd09714f1c0a3cb77100b8d94493fe75d272d353ff4608e20b25a04725e3944b4e7"], 0x0) 19:47:40 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:40 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000380), 0x101bf) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r4, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r4, 0xc028660f, &(0x7f0000001300)={0x2880008}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x1000012, 0x28011, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_MOVE_EXT(0xffffffffffffffff, 0xc028660f, &(0x7f0000000080)={0x0, r4, 0x7}) socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000540)=ANY=[@ANYBLOB="46040000", @ANYRES16=r5, @ANYBLOB="ff830500000700ffffff", @ANYRESOCT=r1], 0x4}}, 0x0) sendmsg$BATADV_CMD_GET_MCAST_FLAGS(0xffffffffffffffff, &(0x7f0000003380)={&(0x7f00000032c0)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000003340)={&(0x7f0000003300)={0x14, r5, 0x10, 0x70bd26, 0x25dfdbff}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4046) sendmsg$nl_route(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000300)={&(0x7f00000000c0)=ANY=[@ANYBLOB="4531b494754800"/20, @ANYRES32=0x0], 0x18}}, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) r7 = socket(0x1, 0x803, 0x0) getsockname$packet(r7, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) getsockname$packet(r7, &(0x7f0000000440)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000500)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000003c0)=ANY=[@ANYBLOB="4800000010000507000002008f1000000000000054048f66e7fbdd8d0686e0117f7bad51c391628ab668bbd215798611e8277e61fb888c8ad736fea8885e16e2f9", @ANYRES32=r8, @ANYBLOB="0000000000000000280012000c00010076657468"], 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="7400000024000705000400"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100716671"], 0x74}}, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='4\x00\x00\x00(\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000ad9d072a146b00e89ff52295aa0d70677b3e6102f70000000000000000000a02"], 0x34}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000003280)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000003240)={&(0x7f0000003200)=@ipv4_getroute={0x1c, 0x1a, 0x2, 0x70bd29, 0x25dfdbfc, {0x2, 0x14, 0x0, 0x9, 0xfc, 0x2, 0xfd, 0x6, 0x3300}, ["", ""]}, 0x1c}, 0x1, 0x0, 0x0, 0xfd69c66ee75b6ee8}, 0x0) getsockname$packet(r3, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) 19:47:40 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6e150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:40 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x74e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:40 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xe98c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:40 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) 19:47:40 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb00000000200000000200000000000000000000000800655800000000e835617a534db33489362395d7be1a7f7749e4b03edd69a99173fc32d5d5be6da7633dd09714f1c0a3cb77100b8d94493fe75d272d353ff4608e20b25a04725e3944b4e7"], 0x0) [ 2666.846702][T27367] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:40 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000000)=ANY=[@ANYBLOB="0180c20000008a061b827e9008004500005800000000002f907864010100640101020420880b0000000000000800000086dd080088be00000000100000000100000000000000080022eb00000000200000000200000000000000000000000800655800000000e835617a534db33489362395d7be1a7f7749e4b03edd69a99173fc32d5d5be6da7633dd09714f1c0a3cb77100b8d94493fe75d272d353ff4608e20b25a04725e3944b4e7"], 0x0) 19:47:40 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) 19:47:41 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000040)=ANY=[@ANYBLOB="ffffffffffff8a061b827e9008004500005800400000002f907864010100640101020420880b0000000000000800000086dd080088be00000004100000000100000000000000080022eb0000000020000400020000000000000000000000080065580000000032c68e3f0b330c9be18e2e85a1ffa8a65aa083cb95574492c425112f3dc12a1b2f8bb0d666a293687bfe89bc370abadc371bec56d39897c7427caed7eee382076ca69c998b1fad42dd9ae9b1b34a0ed525f1c0c4827bb658132eddbee0905be516437b2d8ca7ce9c76aabed1fbafc3b09e823b9df7948dbf29cdba43ca149eace20eb09300ddd32d623f051d70da9067fd61ab9c97093b4552e7b7b155aba02783a03855a2c5307ec0bbf6b3e9590b4e3f3031b645d087394833167a2bedf582f9400146a6ea3117f8261a668710984e19eb4f483f94b7c4cde4b16dc825acd580fb466d939290fc7437a8ff18fb4932a887b4bbbd991863d9786cb4c3423e8040a299b57a84ce42d46a139fc65451bf37fcc1924e8a937aecf0c8aa760032be71ce0b7ca572c3126394d79e962455c12f78dfa9632ff113243934303e71a5051beb13cd6dffba2c865fe0407b979a0a2a"], 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000200)={@void, @val={0x3, 0x4, 0x0, 0x7fff, 0x1, 0xfff}, @eth={@multicast, @dev={'\xaa\xaa\xaa\xaa\xaa', 0x44}, @void, {@llc={0x4, {@snap={0xab, 0x0, "2cd2", "1be30f", 0x6, "760568a7c9489a76d28c415472a503ec14447aa519834976aa6a90ca718177a41d372192a6be87aa18a2ae24c654d65afef8181817e52ef0095375f5b318621bf7046d9aca554b993b8425bef37f65f4c64921b4062c2bdc039e0f00168fa9ef779b402c54c751099171c5be7f8b1fb190d6fd68d40f5b71b0d887c1ef14b68662c6d284029896a00cc3109b9949e035bee9dc6ebe70adb8980dca4ef05d4e899d9c82fa9fd65e4dee204bae"}}}}}}, 0xcd) syz_emit_ethernet(0xa4, &(0x7f0000000300)={@broadcast, @random='-j<3#d', @val={@void, {0x8100, 0x4, 0x1, 0x4}}, {@mpls_uc={0x8847, {[{0x8000}, {0x6}, {0x76}, {0x7}, {0x4, 0x0, 0x1}, {0x8, 0x0, 0x1}], @generic="e61db26f84d5e131ced0a7580e8e44c539edc7e56c36bf548f27371d14f5bf09fd8dc7887d3f8bc3b581f79601535d69cce6a15fff161d36b89b7099c99d198b10414c25de300d33b096aa0c1aa8ee4e5f41572f8b058310cfaa351d8572444dee6c6f18825d0c99766ffad06bb42ca2f30dde78fe3496700ffd"}}}}, 0x0) bpf$ITER_CREATE(0x21, &(0x7f0000000000), 0x8) [ 2666.992262][T27367] bond1206: entered promiscuous mode [ 2667.002322][T27367] 8021q: adding VLAN 0 to HW filter on device bond1206 [ 2667.017224][T27371] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2667.121481][T27371] bond1246: entered promiscuous mode [ 2667.128528][T27371] 8021q: adding VLAN 0 to HW filter on device bond1246 19:47:41 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000000), 0xffffff6a) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2667.165295][T27374] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:41 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000040)=ANY=[@ANYBLOB="ffffffffffff8a061b827e9008004500005800400000002f907864010100640101020420880b0000000000000800000086dd080088be00000004100000000100000000000000080022eb0000000020000400020000000000000000000000080065580000000032c68e3f0b330c9be18e2e85a1ffa8a65aa083cb95574492c425112f3dc12a1b2f8bb0d666a293687bfe89bc370abadc371bec56d39897c7427caed7eee382076ca69c998b1fad42dd9ae9b1b34a0ed525f1c0c4827bb658132eddbee0905be516437b2d8ca7ce9c76aabed1fbafc3b09e823b9df7948dbf29cdba43ca149eace20eb09300ddd32d623f051d70da9067fd61ab9c97093b4552e7b7b155aba02783a03855a2c5307ec0bbf6b3e9590b4e3f3031b645d087394833167a2bedf582f9400146a6ea3117f8261a668710984e19eb4f483f94b7c4cde4b16dc825acd580fb466d939290fc7437a8ff18fb4932a887b4bbbd991863d9786cb4c3423e8040a299b57a84ce42d46a139fc65451bf37fcc1924e8a937aecf0c8aa760032be71ce0b7ca572c3126394d79e962455c12f78dfa9632ff113243934303e71a5051beb13cd6dffba2c865fe0407b979a0a2a"], 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000200)={@void, @val={0x3, 0x4, 0x0, 0x7fff, 0x1, 0xfff}, @eth={@multicast, @dev={'\xaa\xaa\xaa\xaa\xaa', 0x44}, @void, {@llc={0x4, {@snap={0xab, 0x0, "2cd2", "1be30f", 0x6, "760568a7c9489a76d28c415472a503ec14447aa519834976aa6a90ca718177a41d372192a6be87aa18a2ae24c654d65afef8181817e52ef0095375f5b318621bf7046d9aca554b993b8425bef37f65f4c64921b4062c2bdc039e0f00168fa9ef779b402c54c751099171c5be7f8b1fb190d6fd68d40f5b71b0d887c1ef14b68662c6d284029896a00cc3109b9949e035bee9dc6ebe70adb8980dca4ef05d4e899d9c82fa9fd65e4dee204bae"}}}}}}, 0xcd) syz_emit_ethernet(0xa4, &(0x7f0000000300)={@broadcast, @random='-j<3#d', @val={@void, {0x8100, 0x4, 0x1, 0x4}}, {@mpls_uc={0x8847, {[{0x8000}, {0x6}, {0x76}, {0x7}, {0x4, 0x0, 0x1}, {0x8, 0x0, 0x1}], @generic="e61db26f84d5e131ced0a7580e8e44c539edc7e56c36bf548f27371d14f5bf09fd8dc7887d3f8bc3b581f79601535d69cce6a15fff161d36b89b7099c99d198b10414c25de300d33b096aa0c1aa8ee4e5f41572f8b058310cfaa351d8572444dee6c6f18825d0c99766ffad06bb42ca2f30dde78fe3496700ffd"}}}}, 0x0) bpf$ITER_CREATE(0x21, &(0x7f0000000000), 0x8) syz_emit_ethernet(0x66, &(0x7f0000000040)=ANY=[@ANYBLOB="ffffffffffff8a061b827e9008004500005800400000002f907864010100640101020420880b0000000000000800000086dd080088be00000004100000000100000000000000080022eb0000000020000400020000000000000000000000080065580000000032c68e3f0b330c9be18e2e85a1ffa8a65aa083cb95574492c425112f3dc12a1b2f8bb0d666a293687bfe89bc370abadc371bec56d39897c7427caed7eee382076ca69c998b1fad42dd9ae9b1b34a0ed525f1c0c4827bb658132eddbee0905be516437b2d8ca7ce9c76aabed1fbafc3b09e823b9df7948dbf29cdba43ca149eace20eb09300ddd32d623f051d70da9067fd61ab9c97093b4552e7b7b155aba02783a03855a2c5307ec0bbf6b3e9590b4e3f3031b645d087394833167a2bedf582f9400146a6ea3117f8261a668710984e19eb4f483f94b7c4cde4b16dc825acd580fb466d939290fc7437a8ff18fb4932a887b4bbbd991863d9786cb4c3423e8040a299b57a84ce42d46a139fc65451bf37fcc1924e8a937aecf0c8aa760032be71ce0b7ca572c3126394d79e962455c12f78dfa9632ff113243934303e71a5051beb13cd6dffba2c865fe0407b979a0a2a"], 0x0) (async) write$tun(0xffffffffffffffff, &(0x7f0000000200)={@void, @val={0x3, 0x4, 0x0, 0x7fff, 0x1, 0xfff}, @eth={@multicast, @dev={'\xaa\xaa\xaa\xaa\xaa', 0x44}, @void, {@llc={0x4, {@snap={0xab, 0x0, "2cd2", "1be30f", 0x6, "760568a7c9489a76d28c415472a503ec14447aa519834976aa6a90ca718177a41d372192a6be87aa18a2ae24c654d65afef8181817e52ef0095375f5b318621bf7046d9aca554b993b8425bef37f65f4c64921b4062c2bdc039e0f00168fa9ef779b402c54c751099171c5be7f8b1fb190d6fd68d40f5b71b0d887c1ef14b68662c6d284029896a00cc3109b9949e035bee9dc6ebe70adb8980dca4ef05d4e899d9c82fa9fd65e4dee204bae"}}}}}}, 0xcd) (async) syz_emit_ethernet(0xa4, &(0x7f0000000300)={@broadcast, @random='-j<3#d', @val={@void, {0x8100, 0x4, 0x1, 0x4}}, {@mpls_uc={0x8847, {[{0x8000}, {0x6}, {0x76}, {0x7}, {0x4, 0x0, 0x1}, {0x8, 0x0, 0x1}], @generic="e61db26f84d5e131ced0a7580e8e44c539edc7e56c36bf548f27371d14f5bf09fd8dc7887d3f8bc3b581f79601535d69cce6a15fff161d36b89b7099c99d198b10414c25de300d33b096aa0c1aa8ee4e5f41572f8b058310cfaa351d8572444dee6c6f18825d0c99766ffad06bb42ca2f30dde78fe3496700ffd"}}}}, 0x0) (async) bpf$ITER_CREATE(0x21, &(0x7f0000000000), 0x8) (async) [ 2667.281225][T27374] bond1131: entered promiscuous mode [ 2667.287179][T27374] 8021q: adding VLAN 0 to HW filter on device bond1131 19:47:41 executing program 3: syz_emit_ethernet(0x66, &(0x7f0000000040)=ANY=[@ANYBLOB="ffffffffffff8a061b827e9008004500005800400000002f907864010100640101020420880b0000000000000800000086dd080088be00000004100000000100000000000000080022eb0000000020000400020000000000000000000000080065580000000032c68e3f0b330c9be18e2e85a1ffa8a65aa083cb95574492c425112f3dc12a1b2f8bb0d666a293687bfe89bc370abadc371bec56d39897c7427caed7eee382076ca69c998b1fad42dd9ae9b1b34a0ed525f1c0c4827bb658132eddbee0905be516437b2d8ca7ce9c76aabed1fbafc3b09e823b9df7948dbf29cdba43ca149eace20eb09300ddd32d623f051d70da9067fd61ab9c97093b4552e7b7b155aba02783a03855a2c5307ec0bbf6b3e9590b4e3f3031b645d087394833167a2bedf582f9400146a6ea3117f8261a668710984e19eb4f483f94b7c4cde4b16dc825acd580fb466d939290fc7437a8ff18fb4932a887b4bbbd991863d9786cb4c3423e8040a299b57a84ce42d46a139fc65451bf37fcc1924e8a937aecf0c8aa760032be71ce0b7ca572c3126394d79e962455c12f78dfa9632ff113243934303e71a5051beb13cd6dffba2c865fe0407b979a0a2a"], 0x0) write$tun(0xffffffffffffffff, &(0x7f0000000200)={@void, @val={0x3, 0x4, 0x0, 0x7fff, 0x1, 0xfff}, @eth={@multicast, @dev={'\xaa\xaa\xaa\xaa\xaa', 0x44}, @void, {@llc={0x4, {@snap={0xab, 0x0, "2cd2", "1be30f", 0x6, "760568a7c9489a76d28c415472a503ec14447aa519834976aa6a90ca718177a41d372192a6be87aa18a2ae24c654d65afef8181817e52ef0095375f5b318621bf7046d9aca554b993b8425bef37f65f4c64921b4062c2bdc039e0f00168fa9ef779b402c54c751099171c5be7f8b1fb190d6fd68d40f5b71b0d887c1ef14b68662c6d284029896a00cc3109b9949e035bee9dc6ebe70adb8980dca4ef05d4e899d9c82fa9fd65e4dee204bae"}}}}}}, 0xcd) syz_emit_ethernet(0xa4, &(0x7f0000000300)={@broadcast, @random='-j<3#d', @val={@void, {0x8100, 0x4, 0x1, 0x4}}, {@mpls_uc={0x8847, {[{0x8000}, {0x6}, {0x76}, {0x7}, {0x4, 0x0, 0x1}, {0x8, 0x0, 0x1}], @generic="e61db26f84d5e131ced0a7580e8e44c539edc7e56c36bf548f27371d14f5bf09fd8dc7887d3f8bc3b581f79601535d69cce6a15fff161d36b89b7099c99d198b10414c25de300d33b096aa0c1aa8ee4e5f41572f8b058310cfaa351d8572444dee6c6f18825d0c99766ffad06bb42ca2f30dde78fe3496700ffd"}}}}, 0x0) bpf$ITER_CREATE(0x21, &(0x7f0000000000), 0x8) syz_emit_ethernet(0x66, &(0x7f0000000040)=ANY=[@ANYBLOB="ffffffffffff8a061b827e9008004500005800400000002f907864010100640101020420880b0000000000000800000086dd080088be00000004100000000100000000000000080022eb0000000020000400020000000000000000000000080065580000000032c68e3f0b330c9be18e2e85a1ffa8a65aa083cb95574492c425112f3dc12a1b2f8bb0d666a293687bfe89bc370abadc371bec56d39897c7427caed7eee382076ca69c998b1fad42dd9ae9b1b34a0ed525f1c0c4827bb658132eddbee0905be516437b2d8ca7ce9c76aabed1fbafc3b09e823b9df7948dbf29cdba43ca149eace20eb09300ddd32d623f051d70da9067fd61ab9c97093b4552e7b7b155aba02783a03855a2c5307ec0bbf6b3e9590b4e3f3031b645d087394833167a2bedf582f9400146a6ea3117f8261a668710984e19eb4f483f94b7c4cde4b16dc825acd580fb466d939290fc7437a8ff18fb4932a887b4bbbd991863d9786cb4c3423e8040a299b57a84ce42d46a139fc65451bf37fcc1924e8a937aecf0c8aa760032be71ce0b7ca572c3126394d79e962455c12f78dfa9632ff113243934303e71a5051beb13cd6dffba2c865fe0407b979a0a2a"], 0x0) (async) write$tun(0xffffffffffffffff, &(0x7f0000000200)={@void, @val={0x3, 0x4, 0x0, 0x7fff, 0x1, 0xfff}, @eth={@multicast, @dev={'\xaa\xaa\xaa\xaa\xaa', 0x44}, @void, {@llc={0x4, {@snap={0xab, 0x0, "2cd2", "1be30f", 0x6, "760568a7c9489a76d28c415472a503ec14447aa519834976aa6a90ca718177a41d372192a6be87aa18a2ae24c654d65afef8181817e52ef0095375f5b318621bf7046d9aca554b993b8425bef37f65f4c64921b4062c2bdc039e0f00168fa9ef779b402c54c751099171c5be7f8b1fb190d6fd68d40f5b71b0d887c1ef14b68662c6d284029896a00cc3109b9949e035bee9dc6ebe70adb8980dca4ef05d4e899d9c82fa9fd65e4dee204bae"}}}}}}, 0xcd) (async) syz_emit_ethernet(0xa4, &(0x7f0000000300)={@broadcast, @random='-j<3#d', @val={@void, {0x8100, 0x4, 0x1, 0x4}}, {@mpls_uc={0x8847, {[{0x8000}, {0x6}, {0x76}, {0x7}, {0x4, 0x0, 0x1}, {0x8, 0x0, 0x1}], @generic="e61db26f84d5e131ced0a7580e8e44c539edc7e56c36bf548f27371d14f5bf09fd8dc7887d3f8bc3b581f79601535d69cce6a15fff161d36b89b7099c99d198b10414c25de300d33b096aa0c1aa8ee4e5f41572f8b058310cfaa351d8572444dee6c6f18825d0c99766ffad06bb42ca2f30dde78fe3496700ffd"}}}}, 0x0) (async) bpf$ITER_CREATE(0x21, &(0x7f0000000000), 0x8) (async) [ 2667.562235][T27377] bond1206: (slave bridge1108): making interface the new active one [ 2667.572474][T27377] bridge1108: entered promiscuous mode [ 2667.591994][T27377] bond1206: (slave bridge1108): Enslaving as an active interface with an up link [ 2667.747726][T27378] bond1246: (slave bridge1175): making interface the new active one [ 2667.757097][T27378] bridge1175: entered promiscuous mode [ 2667.781018][T27378] bond1246: (slave bridge1175): Enslaving as an active interface with an up link [ 2667.917132][T27380] bond1131: (slave bridge1060): making interface the new active one [ 2667.939671][T27380] bridge1060: entered promiscuous mode [ 2667.981424][T27380] bond1131: (slave bridge1060): Enslaving as an active interface with an up link [ 2668.027493][T27373] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2668.066282][T27383] netlink: 72 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2668.170151][T27387] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.2'. 19:47:42 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 1) 19:47:42 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) ioctl$FS_IOC_FSSETXATTR(0xffffffffffffffff, 0x401c5820, &(0x7f0000000140)={0x20}) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:42 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6d150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:42 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6f150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:42 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x75e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:42 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xea8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:42 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2668.430898][T27427] FAULT_INJECTION: forcing a failure. [ 2668.430898][T27427] name failslab, interval 1, probability 0, space 0, times 0 [ 2668.445415][T27412] bond11: entered promiscuous mode [ 2668.453667][T27412] 8021q: adding VLAN 0 to HW filter on device bond11 [ 2668.533942][T27427] CPU: 1 PID: 27427 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2668.544456][T27427] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2668.554568][T27427] Call Trace: [ 2668.557889][T27427] [ 2668.560863][T27427] dump_stack_lvl+0x125/0x1b0 [ 2668.565629][T27427] should_fail_ex+0x496/0x5b0 [ 2668.570420][T27427] should_failslab+0x9/0x20 [ 2668.575012][T27427] kmem_cache_alloc+0x33a/0x3b0 [ 2668.579950][T27427] ? preempt_count_sub+0x150/0x150 [ 2668.585157][T27427] jbd2__journal_start+0x190/0x690 [ 2668.590374][T27427] __ext4_journal_start_sb+0x40f/0x5c0 [ 2668.595932][T27427] ? ext4_dirty_inode+0xa1/0x130 [ 2668.601055][T27427] ? ext4_setattr+0x29e0/0x29e0 [ 2668.606000][T27427] ext4_dirty_inode+0xa1/0x130 [ 2668.610873][T27427] ? rcu_is_watching+0x12/0xb0 [ 2668.615763][T27427] __mark_inode_dirty+0x1e0/0xd50 [ 2668.620902][T27427] generic_update_time+0x21b/0x2b0 [ 2668.626112][T27427] file_modified_flags+0x2d8/0x330 [ 2668.631287][T27427] ? iunique+0x380/0x380 [ 2668.635590][T27427] ext4_buffered_write_iter+0xf9/0x3c0 [ 2668.641102][T27427] ext4_file_write_iter+0x7ee/0x1950 [ 2668.646450][T27427] ? lock_sync+0x190/0x190 [ 2668.650920][T27427] ? ext4_file_splice_read+0x150/0x150 [ 2668.656439][T27427] vfs_write+0x650/0xe40 [ 2668.660748][T27427] ? kernel_write+0x6c0/0x6c0 [ 2668.665496][T27427] ? __fget_files+0x279/0x410 [ 2668.670247][T27427] ksys_write+0x12f/0x250 [ 2668.674639][T27427] ? __ia32_sys_read+0xb0/0xb0 [ 2668.679477][T27427] ? syscall_enter_from_user_mode+0x26/0x80 [ 2668.685456][T27427] do_syscall_64+0x38/0xb0 [ 2668.689925][T27427] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2668.695868][T27427] RIP: 0033:0x7f8cd127cae9 [ 2668.700318][T27427] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2668.719970][T27427] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2668.728430][T27427] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2668.736440][T27427] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2668.744449][T27427] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2668.752455][T27427] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2668.760457][T27427] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2668.768517][T27427] 19:47:42 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2668.792142][T27415] bond1132: entered promiscuous mode [ 2668.803883][T27415] 8021q: adding VLAN 0 to HW filter on device bond1132 19:47:42 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 2) [ 2668.938184][T27436] FAULT_INJECTION: forcing a failure. [ 2668.938184][T27436] name fail_page_alloc, interval 1, probability 0, space 0, times 0 [ 2668.953694][T27436] CPU: 0 PID: 27436 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2668.956118][T27416] bond1247: entered promiscuous mode [ 2668.964158][T27436] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2668.964373][T27436] Call Trace: [ 2668.964386][T27436] [ 2668.964399][T27436] dump_stack_lvl+0x125/0x1b0 [ 2668.964450][T27436] should_fail_ex+0x496/0x5b0 [ 2668.964506][T27436] __should_fail_alloc_page+0xe7/0x130 [ 2668.964550][T27436] prepare_alloc_pages.constprop.0+0x16f/0x550 [ 2668.964612][T27436] __alloc_pages+0x14e/0x4a0 [ 2668.964665][T27436] ? __alloc_pages_slowpath.constprop.0+0x2360/0x2360 [ 2668.964746][T27436] __folio_alloc+0x16/0x40 [ 2668.988609][T27416] 8021q: adding VLAN 0 to HW filter on device bond1247 [ 2668.991019][T27436] filemap_alloc_folio+0x154/0x490 [ 2668.991094][T27436] ? folio_wake_bit+0x270/0x270 [ 2669.032707][T27419] validate_nla: 3 callbacks suppressed [ 2669.032732][T27419] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2669.035215][T27436] ? asm_exc_page_fault+0x26/0x30 [ 2669.035270][T27436] ? lockdep_hardirqs_on+0x7d/0x100 [ 2669.035330][T27436] __filemap_get_folio+0x288/0x990 [ 2669.035379][T27436] ext4_da_write_begin+0x3c7/0x8c0 [ 2669.074412][T27436] ? ext4_write_begin+0x1100/0x1100 [ 2669.079711][T27436] generic_perform_write+0x278/0x600 [ 2669.085094][T27436] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2669.090545][T27436] ? iunique+0x380/0x380 [ 2669.094843][T27436] ext4_buffered_write_iter+0x11f/0x3c0 [ 2669.100434][T27436] ext4_file_write_iter+0x7ee/0x1950 [ 2669.105773][T27436] ? lock_sync+0x190/0x190 [ 2669.110232][T27436] ? ext4_file_splice_read+0x150/0x150 [ 2669.115754][T27436] vfs_write+0x650/0xe40 [ 2669.120055][T27436] ? kernel_write+0x6c0/0x6c0 [ 2669.124789][T27436] ? __fget_files+0x279/0x410 [ 2669.129527][T27436] ksys_write+0x12f/0x250 [ 2669.133908][T27436] ? __ia32_sys_read+0xb0/0xb0 [ 2669.138720][T27436] ? syscall_enter_from_user_mode+0x26/0x80 [ 2669.144670][T27436] do_syscall_64+0x38/0xb0 [ 2669.149126][T27436] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2669.155063][T27436] RIP: 0033:0x7f8cd127cae9 [ 2669.159507][T27436] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2669.179154][T27436] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2669.187606][T27436] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2669.195604][T27436] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2669.203605][T27436] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2669.211604][T27436] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2669.219603][T27436] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2669.227623][T27436] 19:47:43 executing program 0: socket$nl_route(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:43 executing program 0: socket$nl_route(0x10, 0x3, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:43 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 3) 19:47:43 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2669.269421][T27419] workqueue: Failed to create a rescuer kthread for wq "bond1207": -EINTR [ 2669.512400][T27444] FAULT_INJECTION: forcing a failure. [ 2669.512400][T27444] name failslab, interval 1, probability 0, space 0, times 0 [ 2669.544317][T27444] CPU: 0 PID: 27444 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2669.546664][T27420] bond11: (slave bridge5): making interface the new active one [ 2669.554774][T27444] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2669.556074][T27444] Call Trace: [ 2669.556088][T27444] [ 2669.556101][T27444] dump_stack_lvl+0x125/0x1b0 [ 2669.556154][T27444] should_fail_ex+0x496/0x5b0 [ 2669.556204][T27444] ? __es_tree_search.isra.0+0x210/0x210 [ 2669.556246][T27444] should_failslab+0x9/0x20 [ 2669.556294][T27444] kmem_cache_alloc+0x69/0x3b0 [ 2669.604631][T27444] __es_insert_extent+0x741/0x1470 [ 2669.609819][T27444] ? do_raw_write_lock+0x11e/0x3b0 [ 2669.611758][T27420] bridge5: entered promiscuous mode [ 2669.614982][T27444] ? do_raw_read_unlock+0xe0/0xe0 [ 2669.615052][T27444] ext4_es_insert_extent+0x303/0xe10 [ 2669.615102][T27444] ? ext4_es_scan_clu+0x310/0x310 [ 2669.635740][T27444] ? __ext4_handle_dirty_metadata+0x8e0/0x8e0 [ 2669.641900][T27444] ? __es_find_extent_range+0x1eb/0x4d0 [ 2669.647956][T27444] ? rcu_is_watching+0x12/0xb0 [ 2669.652818][T27444] ext4_ext_map_blocks+0x1a85/0x5b00 [ 2669.658192][T27444] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 2669.664263][T27444] ? print_usage_bug.part.0+0x670/0x670 [ 2669.669893][T27444] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 2669.671172][T27420] bond11: (slave bridge5): Enslaving as an active interface with an up link [ 2669.675947][T27444] ? ext4_ext_release+0x10/0x10 [ 2669.675999][T27444] ? lock_sync+0x190/0x190 [ 2669.676050][T27444] ? reacquire_held_locks+0x4b0/0x4b0 [ 2669.699477][T27444] ? ext4_es_lookup_extent+0xc7/0xbf0 [ 2669.704947][T27444] ext4_da_get_block_prep+0xd80/0x1340 [ 2669.710499][T27444] ? ext4_dax_writepages+0xb30/0xb30 [ 2669.715874][T27444] ? ext4_block_write_begin+0xc08/0xe30 [ 2669.721501][T27444] ? reacquire_held_locks+0x4b0/0x4b0 [ 2669.726961][T27444] ? __sanitizer_cov_trace_pc+0xb/0x70 [ 2669.732511][T27444] ? folio_flags.constprop.0+0x56/0x150 [ 2669.738159][T27444] ext4_block_write_begin+0x3da/0xe30 [ 2669.743628][T27444] ? ext4_dax_writepages+0xb30/0xb30 [ 2669.749012][T27444] ? mpage_map_and_submit_buffers+0xab0/0xab0 [ 2669.755172][T27444] ? __filemap_get_folio+0x1e7/0x990 [ 2669.760543][T27444] ext4_da_write_begin+0x40a/0x8c0 [ 2669.765768][T27444] ? ext4_write_begin+0x1100/0x1100 [ 2669.771069][T27444] generic_perform_write+0x278/0x600 [ 2669.776462][T27444] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2669.781934][T27444] ? iunique+0x380/0x380 [ 2669.786279][T27444] ext4_buffered_write_iter+0x11f/0x3c0 [ 2669.791915][T27444] ext4_file_write_iter+0x7ee/0x1950 [ 2669.797304][T27444] ? lock_sync+0x190/0x190 [ 2669.801808][T27444] ? ext4_file_splice_read+0x150/0x150 [ 2669.807364][T27444] vfs_write+0x650/0xe40 [ 2669.811716][T27444] ? kernel_write+0x6c0/0x6c0 [ 2669.816493][T27444] ? __fget_files+0x279/0x410 [ 2669.821289][T27444] ksys_write+0x12f/0x250 [ 2669.825712][T27444] ? __ia32_sys_read+0xb0/0xb0 [ 2669.830565][T27444] ? syscall_enter_from_user_mode+0x26/0x80 [ 2669.836574][T27444] do_syscall_64+0x38/0xb0 [ 2669.841099][T27444] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2669.847079][T27444] RIP: 0033:0x7f8cd127cae9 [ 2669.851559][T27444] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2669.871247][T27444] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2669.879729][T27444] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2669.887759][T27444] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2669.895794][T27444] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2669.903830][T27444] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2669.911862][T27444] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2669.919926][T27444] 19:47:43 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:43 executing program 3: r0 = socket$inet6(0xa, 0x6, 0x0) listen(r0, 0x0) setsockopt(r0, 0x8000, 0x1, &(0x7f0000000540)="ce3100e83e1721513d744172ecb4749c6ddb32ad4aa73db22f719bc0b382176029ad012ee1cd90a9cc54e0766746b9ef9c56810d2378c1200825f7ad954a6378d79bdeb6a273bab56d4f1c89ab25f39aaa66ea090254c0d72bb6fcfefebc54ac3651328a1cc08b642c6bd405f6d1a43c12a34263a4568ee158b49a821ceb58af353b88b1dc059ec3ca09d4fc614773e936a809c2467a5cb28234f2941623e33fc7d0acbcd42368f184dc85b0f55ab81a2c1fe32ea0d94093e6b401e2ce8d7950d6101fc4da25b2342c37c50aace7f9fd86fab4ede257b02c30cbf7060e1d3c48b2e2f4756712e8839a5b92fd61546eb14cf98b0701619024b7cc100bc3c5e2b2d9333697de12308a21fd75ecb97d5d6697782077837f4475198e7dd2e4b173f4288d8dc284480f12c18be24b983b440774df3464f4b352934163f4ec3a78e252397d8c40f700e68090b6de8fba5f9954d626257b52256e598ccc9c0aa9f7d397d0961e43bc2109448223bf09162c2790e059b65ba7027a7cb0cc34849a90accc46411d56f5d24c8f9fd118a675efbdd8a3d7118e6cd747678072747e5861e7afa9122ca6a61908a925359a9bf022fbf65a6344748208e096c5cb9e1cc192b9e0d2b9cc4f76a1f1ab0dccd1b35252635b81a4e0c71de9b4f6fa66248cef191eb9521622ec0af64c70352d59bf8f8b5dc80c6764eb0c696645ed6641335c606a8be80595fca1cafeaaafc80d5d30a1f6efe589cae1a4f9bb1ed9803fcc1c4582e0776723569508f188c208faeceb670f808e9ffc8893effd3cb4b599c1c4976fdb7884dd285a4e15d30a10a19411f9d1b0940a127dbb7bf5b91879d14bec0a686885d60e63937377accdcd3e88bf1e944073034369ebf9d439b61de3738c2e8c503ee288972938ab47d091439bcb53804922c74e41cfb7f254b5967eaef1d8689474818db302268db4b69cfc20a34bfc9115941c2efa66f0345f2234985845cf31478ded60340e6c0d0199d094ed66cfcc83151d70a505834a5fa74fee9eab0298970a56bdccbcac7effce6286d3620254625bd9696666a9cfa428c26fe273cb2498b923380e745aea217216378d157b97abcb3435d5e15ee55eb1abdcebaaf2cc00054ea506562de7569e8aebb3154209db2c5405ae45352154b90e090a27b0c1638215649a7b72979747b2fa163248bbd4048bdf89eb72cda01b1c6559f4ababe3cba2f190ec0516b68d668ac94a7100e0a64e91e2d8e9913a4a9f233a3313bcce00c6c8b2fb0c96c599697caed036003072467d4b7ccbecaac47d7d14ada5b9c7ae074641b638830b9326380d8b2d3eba99fde0d97180198053a7a61877e2cd2734d7d95004ab73571f2c4bc47fd5af8afde89462af8a1cda361d6a259a32150da06c1343f295221b784dc4dd5c83a570a898c29513232df1744cbae8ba12ac678e027f826f10cf053fc3b391de692587dc2da640ef0b1fb5bf8fd8c947f54e51781aaad7c78d6bb13064fe2e4e067471e1cc6c8ebb4c274d7b2b7904d2cf9520fd185ffcfab56b4fad799e9467efbc29c76e21cd01c060fed97a0f5b80be30c16a002caa88962196ae794c0650e49135aeac46ea4b0ade97f65a82cf4b2be86704fde39ad460ab9935378b8a4ab3a15bca8859b8b037c04b5e5a2e131dabae850e9ba1cb6150d5fd4c588f76e65d3ac3d92cc3c1366f51fddddf134375f3b215c9fd9154501d4de7bfdb66c5acb856d06b9ec3d3243e61913a92b87a51d01491129511cd0ef65aad1dd2e8bda5e454dc5ce23bca36474ab999273e9c46f0f827fd5c9ae923c8dacaee95f006e44c284083bdfde673a36ff239d71bf8610fb0b3753d61b4acfd0db986547d3e981479636ee9c0b05a11ed12011e03a931e79d7ec89e99283b394cf6af01ffdc072631f0f9667fc6a5ca5a93121d5c115f8c88754cff7d9627ebe1d215100824d61869fcf59fd60e086df5d2418a64f6f81de4978453b39c1832607c21e10e9812550615bc09ab29d0f6b565a73e41b23a0b1624702e23d42a48cf07fd5871605bd980b310ed258d3d2ff0f119b1b2832191410665cbbcafc64d26de6368a85eb475d459635ae0693de2589887e21699d8688656963351bedfbb04fbfbb64089b0510087d88fe18d98faa11e2b9ae696635eab03e4a223f103ba52dde0d234c9ca84ab9225de350b693c492d7194ad68e9b1735c185d0d0291e6fb8f1e859c9bce109aae9b0aa0103fc9d743d6f07a1871ae211acf812cb2457d2c665fc5fc5f2dd65745168a34ad4c692bc0436ae9b9efb48ec0f379e9efd675cf9957c6af3a7a851464a2f3406ca147a1a162eeda501a2469e117a0769e9c731f25373fb9b3fc6c8ec564f0e26ee8407fdba89be6e067a875042e1b06a6de3b4563df45d3a9a2f734de9291fc24958b46a9820cb3f21e8b9aeab33d97ae235fc59e6a65b80367b3c7efb2abb507baa5e41c39e637f4899dddd85647f5b5cae97b8dd0126d604d7ac451dff0eb01f6a006625629ca5e355fd70f227a5b42de6ef8ee77ba10ccf39e22852fa7645a6054b72fbdf415aabd7c9a7490b6328629b7240909d85959203f82defbdb303240f3773521d7ffed6be3a1ad43add7ae079976f415bfae1ffcc68b7efc0b4c62f99036c68d9e62425025fe55a1b1745776263319563470cd0617f5127df0605c934667097173be10492b3889a204a7ec6aae0027f6e02c72649388d7cad750d1af898520443be8ea9b99eb5664fd191ea835362fdda778ee0799986f6aade1ac7038cf58803b5733352b6c81dd0f282310765d5cb98b318a0a88888f8a8a4c9f1c18320729cf5cb1204c4a4b436648ce3a1b2206fe477f4a4990b299e20270c5ab7f4f23cc11c5a86c649e347f03c41649c5a804bbc78a2537b612e8fc48079872ce5f129f2a3e325e34a48850eb572ba886331b986552ca2091d6fecf701f6f688150abec1f7231c2a79518114c990fed44ee72ec6161b03f6c6a7a3d253b6fa9a8b8a28ffc55117ea8c96f2120fc4f1c4c6e1798c1682cc7fcb82333d7bdde94bf54a4ae039c597f79740d6f8bf2f2428ed5858e0d0e8d20e4d9d4934215e7b541f2d71870f89e2186ae5759f78a964edbefe5e6409b16c1f2df02e198ab6587d4f57999c66167a295598ce8549cb72140d2261e2f75d1b94a900f751d7cc96ab118c04a93dc4cae74699f2f0128e8123c3e6a46e282591eae11ae3c26d4310eb9f6caa2e427e6f2bf94e0b5e8a84f01f0e22e69a0bb679fa04231c67c285aa1906e2b106eb9a8b1d2a1c62593f62bf7376dc978582da6e62524573306f312d08ca36c01d059db68fe6a2c45c525e959e40f20be5980bfac86f87509fd4d90aff4a7459cb7c0960f167b8df4986719dea196305ca0c849b0a02ef917857a5da4d47c2b0bae6c26a9627148da655ee696b8c95e7c075dbd9801586fe99fbe33e5bac4f65d8dfb7a70c2911d816af01c802c360897c690f564ac05aa62a559842685a27c834883ec3bbf4881b356ef218cbaee4cad0994677e97a7d67c49b030fe8949439add40bffdf2d332141d0ae6786bd90fa0770641e0374c337780d2d871975c82a52fcc1df3090576ac56457d6644e9d4711ad22e808b2eff8e96951988d0496369c0d3f1920b222041ee9f38d1ffec28b742f9c33039612c51723312442a3e9698d4094df3307168bd7cd42e2df293a1b3801aa36b1a974c2fb68bd069a907db328301ec234bbb7879ce13975557f8620c26dcf74d22fe77d5fc30dae73db3365a9bd3267e54287fe56fb123e4d7ef14905d798cb77f6041b8f9d5c767e764b57eee391f642e1a6abc83aeab77d42db058be3d3ae6db3dce42bdda99c538cec504edd80f9e2e1b26cb5ff576d1949c3a3dec754be8f3e37b59703484e1e15f9c504a6313e0f2a517626f3bf3bed30fd851a86a60a3d7c667742904c0286a42f17f991527c88e8ea52a011621b4617d43c340458d10d20be60e57bbcd515cbe829ad28275c4d91717532b4eb65a5af76793df928d3b231b973ebdb80aeae9bf36bb938ea8379ad8912d7a5b454e829485781b2d27fc05dfae8f92ade2e823bc3686db670d63fffb4c35d7058cc14cd7707cedeba18d657c81f4a128b527c48309d1c5cf8ec9eee9240bcb27281154072b8e8842b94ca2ac5c85d88181af8744f0e4ea38e18a22aca606dee32fac7607220ffeaee5a65ca4236709135ea01f1aca6718554d5162683166aad6b83b5c692b66cd3688afcc2733ac45bc9a4b4dbfa7e9aa121ead5a3cbde1b857ca3caa40dca3ad07fb1539628eb620a221ae82ca6c654c23db2334f448d1644684ec5e73d9b7108e7ad4cd39f14b84863da656dd018b472d814d5854bf3cfe79488740a86c8c2f39010d6cb09461035b4d3fa39e9144d39e8f7f0e552fb448a43075c3f860adc2d56edf83ce4f120052cdddab0539b444392030017b8b04d40f1327d1f72e515909e79992fe8ae3caf18d5d0bb041304fde7eacaf85bd7093daf9b413ec3a4bca8900c82b1a3a973fde6b5350eba00098d635eabd0b69d95a07ae744329f771ff8ea4fc226edb5c91af4dd8b3523aa510ead13161d41df17ba169b6988126f04410a27050da3d2d9784bef5ef93c4c739f4b4ddd84c0ca25d68a0713a6250056336ca9e0d9bf5b7b680beb1a05fbecadccd6b2a2fa31e1265cc1af5db33222aa38709b760033705acfe0cbdbb2477c68f1f2ca8eff3045141fb57d143911bed0192157530d4ad3071825a2ae7dcf8b6ef431af4b28312039cf719204e7bef3aaf3442664575c959b808cc9bb7b478f9c3ea9dfc6e29f9512922062e5aeec97af9d249f33ce88ae3bfa16170e6549200d3cecec0b0a952b532814be854724321c2dd1aa1c847c37b289ec4e3744aa8cdda72c19aa0bd48b5f2842ceee7d563bb2ebfd33ac96c9afb863defd23179afd2d80cf1a3f86fa4b3611e7199af23ce2890b27736ade2c8bf1a30c82d39a253ecbfd2da905974e60855c09613fd1b224c09f240578935d1e523df1828b62b463b3e45c1ed4ab3a1d39ae52c35b9c36ebd79c273c8ce530a2825c36c390fa59b3b7ff1d86c1f9269e2db643a6926e68a44fd18cff9d0239f4a0ddb46a591eb7ac5001f3f70b1eeb566873cbf056ac380a6beeb585356413933a6bda59beabbe13c5d8b57775b8d472e9f3df52b0e916c88883dbe6b3560747ed866c85c0b5b7a10a77a20a247fb25895c0829a21853e0c23ad4db3a71450b56107fc1ef86e6b616c0525c29f6f7fc0c20591388dbd4f86ba348524569f0d04807bf62d8f3218aa8f54ff0d4b654706e0b9a64c1ca5fca08dcc0a583a282536fa5ef7b7c49f1541e070f5f27bc6b535c9913e5dc1a73ee1d75c09b05ab930b25e6c013e7fe9d1bba233fdcfe62f20f2cecd1535c51ad9c46c9e241e35b26103383e7bddf88f4697fa84af3d59cec4586c98f7a05bb3696df4b226129465cbf01679a5a29fceaadc338b1f5a845dcc5fdb3ae4d8107e1f0defccbbd4d0ba6a56be646ee6065288e5d73291588f775539fffc0b381cf0de585c36742b87a9392f32dacfbb0508657fca7b4e6ae5e39b1b6792f370cee27497ba2bdef5076f52c970cb732107e1009cf0655c88ebd8349a74d2aa00f6b7621654a02b8a19ec050b515edf4cb3d54cdbb554045c06e6242f96a0eedbdc7bd9c847a3f422cbd403830106d0a4b155d609ed7aa81562df53885f0f0d8d0ee8d1217fb21b0276f80e77bfa5ef65926c341c890375b3bc3f3", 0x1000) r1 = bpf$MAP_CREATE(0x0, &(0x7f00000004c0)=@bloom_filter={0x1e, 0x0, 0x8, 0x4, 0x0, 0x1}, 0x48) ppoll(&(0x7f00000000c0)=[{r0}, {r1}], 0x2, 0x0, 0x0, 0x0) [ 2669.942934][T27421] bond1132: (slave bridge1061): making interface the new active one [ 2669.979338][T27421] bridge1061: entered promiscuous mode [ 2670.021799][T27421] bond1132: (slave bridge1061): Enslaving as an active interface with an up link [ 2670.129932][T27423] bond1247: (slave bridge1176): making interface the new active one [ 2670.140219][T27423] bridge1176: entered promiscuous mode [ 2670.155720][T27423] bond1247: (slave bridge1176): Enslaving as an active interface with an up link 19:47:44 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 4) 19:47:44 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:44 executing program 3: r0 = socket$inet6(0xa, 0x6, 0x0) listen(r0, 0x0) setsockopt(r0, 0x8000, 0x1, &(0x7f0000000540)="ce3100e83e1721513d744172ecb4749c6ddb32ad4aa73db22f719bc0b382176029ad012ee1cd90a9cc54e0766746b9ef9c56810d2378c1200825f7ad954a6378d79bdeb6a273bab56d4f1c89ab25f39aaa66ea090254c0d72bb6fcfefebc54ac3651328a1cc08b642c6bd405f6d1a43c12a34263a4568ee158b49a821ceb58af353b88b1dc059ec3ca09d4fc614773e936a809c2467a5cb28234f2941623e33fc7d0acbcd42368f184dc85b0f55ab81a2c1fe32ea0d94093e6b401e2ce8d7950d6101fc4da25b2342c37c50aace7f9fd86fab4ede257b02c30cbf7060e1d3c48b2e2f4756712e8839a5b92fd61546eb14cf98b0701619024b7cc100bc3c5e2b2d9333697de12308a21fd75ecb97d5d6697782077837f4475198e7dd2e4b173f4288d8dc284480f12c18be24b983b440774df3464f4b352934163f4ec3a78e252397d8c40f700e68090b6de8fba5f9954d626257b52256e598ccc9c0aa9f7d397d0961e43bc2109448223bf09162c2790e059b65ba7027a7cb0cc34849a90accc46411d56f5d24c8f9fd118a675efbdd8a3d7118e6cd747678072747e5861e7afa9122ca6a61908a925359a9bf022fbf65a6344748208e096c5cb9e1cc192b9e0d2b9cc4f76a1f1ab0dccd1b35252635b81a4e0c71de9b4f6fa66248cef191eb9521622ec0af64c70352d59bf8f8b5dc80c6764eb0c696645ed6641335c606a8be80595fca1cafeaaafc80d5d30a1f6efe589cae1a4f9bb1ed9803fcc1c4582e0776723569508f188c208faeceb670f808e9ffc8893effd3cb4b599c1c4976fdb7884dd285a4e15d30a10a19411f9d1b0940a127dbb7bf5b91879d14bec0a686885d60e63937377accdcd3e88bf1e944073034369ebf9d439b61de3738c2e8c503ee288972938ab47d091439bcb53804922c74e41cfb7f254b5967eaef1d8689474818db302268db4b69cfc20a34bfc9115941c2efa66f0345f2234985845cf31478ded60340e6c0d0199d094ed66cfcc83151d70a505834a5fa74fee9eab0298970a56bdccbcac7effce6286d3620254625bd9696666a9cfa428c26fe273cb2498b923380e745aea217216378d157b97abcb3435d5e15ee55eb1abdcebaaf2cc00054ea506562de7569e8aebb3154209db2c5405ae45352154b90e090a27b0c1638215649a7b72979747b2fa163248bbd4048bdf89eb72cda01b1c6559f4ababe3cba2f190ec0516b68d668ac94a7100e0a64e91e2d8e9913a4a9f233a3313bcce00c6c8b2fb0c96c599697caed036003072467d4b7ccbecaac47d7d14ada5b9c7ae074641b638830b9326380d8b2d3eba99fde0d97180198053a7a61877e2cd2734d7d95004ab73571f2c4bc47fd5af8afde89462af8a1cda361d6a259a32150da06c1343f295221b784dc4dd5c83a570a898c29513232df1744cbae8ba12ac678e027f826f10cf053fc3b391de692587dc2da640ef0b1fb5bf8fd8c947f54e51781aaad7c78d6bb13064fe2e4e067471e1cc6c8ebb4c274d7b2b7904d2cf9520fd185ffcfab56b4fad799e9467efbc29c76e21cd01c060fed97a0f5b80be30c16a002caa88962196ae794c0650e49135aeac46ea4b0ade97f65a82cf4b2be86704fde39ad460ab9935378b8a4ab3a15bca8859b8b037c04b5e5a2e131dabae850e9ba1cb6150d5fd4c588f76e65d3ac3d92cc3c1366f51fddddf134375f3b215c9fd9154501d4de7bfdb66c5acb856d06b9ec3d3243e61913a92b87a51d01491129511cd0ef65aad1dd2e8bda5e454dc5ce23bca36474ab999273e9c46f0f827fd5c9ae923c8dacaee95f006e44c284083bdfde673a36ff239d71bf8610fb0b3753d61b4acfd0db986547d3e981479636ee9c0b05a11ed12011e03a931e79d7ec89e99283b394cf6af01ffdc072631f0f9667fc6a5ca5a93121d5c115f8c88754cff7d9627ebe1d215100824d61869fcf59fd60e086df5d2418a64f6f81de4978453b39c1832607c21e10e9812550615bc09ab29d0f6b565a73e41b23a0b1624702e23d42a48cf07fd5871605bd980b310ed258d3d2ff0f119b1b2832191410665cbbcafc64d26de6368a85eb475d459635ae0693de2589887e21699d8688656963351bedfbb04fbfbb64089b0510087d88fe18d98faa11e2b9ae696635eab03e4a223f103ba52dde0d234c9ca84ab9225de350b693c492d7194ad68e9b1735c185d0d0291e6fb8f1e859c9bce109aae9b0aa0103fc9d743d6f07a1871ae211acf812cb2457d2c665fc5fc5f2dd65745168a34ad4c692bc0436ae9b9efb48ec0f379e9efd675cf9957c6af3a7a851464a2f3406ca147a1a162eeda501a2469e117a0769e9c731f25373fb9b3fc6c8ec564f0e26ee8407fdba89be6e067a875042e1b06a6de3b4563df45d3a9a2f734de9291fc24958b46a9820cb3f21e8b9aeab33d97ae235fc59e6a65b80367b3c7efb2abb507baa5e41c39e637f4899dddd85647f5b5cae97b8dd0126d604d7ac451dff0eb01f6a006625629ca5e355fd70f227a5b42de6ef8ee77ba10ccf39e22852fa7645a6054b72fbdf415aabd7c9a7490b6328629b7240909d85959203f82defbdb303240f3773521d7ffed6be3a1ad43add7ae079976f415bfae1ffcc68b7efc0b4c62f99036c68d9e62425025fe55a1b1745776263319563470cd0617f5127df0605c934667097173be10492b3889a204a7ec6aae0027f6e02c72649388d7cad750d1af898520443be8ea9b99eb5664fd191ea835362fdda778ee0799986f6aade1ac7038cf58803b5733352b6c81dd0f282310765d5cb98b318a0a88888f8a8a4c9f1c18320729cf5cb1204c4a4b436648ce3a1b2206fe477f4a4990b299e20270c5ab7f4f23cc11c5a86c649e347f03c41649c5a804bbc78a2537b612e8fc48079872ce5f129f2a3e325e34a48850eb572ba886331b986552ca2091d6fecf701f6f688150abec1f7231c2a79518114c990fed44ee72ec6161b03f6c6a7a3d253b6fa9a8b8a28ffc55117ea8c96f2120fc4f1c4c6e1798c1682cc7fcb82333d7bdde94bf54a4ae039c597f79740d6f8bf2f2428ed5858e0d0e8d20e4d9d4934215e7b541f2d71870f89e2186ae5759f78a964edbefe5e6409b16c1f2df02e198ab6587d4f57999c66167a295598ce8549cb72140d2261e2f75d1b94a900f751d7cc96ab118c04a93dc4cae74699f2f0128e8123c3e6a46e282591eae11ae3c26d4310eb9f6caa2e427e6f2bf94e0b5e8a84f01f0e22e69a0bb679fa04231c67c285aa1906e2b106eb9a8b1d2a1c62593f62bf7376dc978582da6e62524573306f312d08ca36c01d059db68fe6a2c45c525e959e40f20be5980bfac86f87509fd4d90aff4a7459cb7c0960f167b8df4986719dea196305ca0c849b0a02ef917857a5da4d47c2b0bae6c26a9627148da655ee696b8c95e7c075dbd9801586fe99fbe33e5bac4f65d8dfb7a70c2911d816af01c802c360897c690f564ac05aa62a559842685a27c834883ec3bbf4881b356ef218cbaee4cad0994677e97a7d67c49b030fe8949439add40bffdf2d332141d0ae6786bd90fa0770641e0374c337780d2d871975c82a52fcc1df3090576ac56457d6644e9d4711ad22e808b2eff8e96951988d0496369c0d3f1920b222041ee9f38d1ffec28b742f9c33039612c51723312442a3e9698d4094df3307168bd7cd42e2df293a1b3801aa36b1a974c2fb68bd069a907db328301ec234bbb7879ce13975557f8620c26dcf74d22fe77d5fc30dae73db3365a9bd3267e54287fe56fb123e4d7ef14905d798cb77f6041b8f9d5c767e764b57eee391f642e1a6abc83aeab77d42db058be3d3ae6db3dce42bdda99c538cec504edd80f9e2e1b26cb5ff576d1949c3a3dec754be8f3e37b59703484e1e15f9c504a6313e0f2a517626f3bf3bed30fd851a86a60a3d7c667742904c0286a42f17f991527c88e8ea52a011621b4617d43c340458d10d20be60e57bbcd515cbe829ad28275c4d91717532b4eb65a5af76793df928d3b231b973ebdb80aeae9bf36bb938ea8379ad8912d7a5b454e829485781b2d27fc05dfae8f92ade2e823bc3686db670d63fffb4c35d7058cc14cd7707cedeba18d657c81f4a128b527c48309d1c5cf8ec9eee9240bcb27281154072b8e8842b94ca2ac5c85d88181af8744f0e4ea38e18a22aca606dee32fac7607220ffeaee5a65ca4236709135ea01f1aca6718554d5162683166aad6b83b5c692b66cd3688afcc2733ac45bc9a4b4dbfa7e9aa121ead5a3cbde1b857ca3caa40dca3ad07fb1539628eb620a221ae82ca6c654c23db2334f448d1644684ec5e73d9b7108e7ad4cd39f14b84863da656dd018b472d814d5854bf3cfe79488740a86c8c2f39010d6cb09461035b4d3fa39e9144d39e8f7f0e552fb448a43075c3f860adc2d56edf83ce4f120052cdddab0539b444392030017b8b04d40f1327d1f72e515909e79992fe8ae3caf18d5d0bb041304fde7eacaf85bd7093daf9b413ec3a4bca8900c82b1a3a973fde6b5350eba00098d635eabd0b69d95a07ae744329f771ff8ea4fc226edb5c91af4dd8b3523aa510ead13161d41df17ba169b6988126f04410a27050da3d2d9784bef5ef93c4c739f4b4ddd84c0ca25d68a0713a6250056336ca9e0d9bf5b7b680beb1a05fbecadccd6b2a2fa31e1265cc1af5db33222aa38709b760033705acfe0cbdbb2477c68f1f2ca8eff3045141fb57d143911bed0192157530d4ad3071825a2ae7dcf8b6ef431af4b28312039cf719204e7bef3aaf3442664575c959b808cc9bb7b478f9c3ea9dfc6e29f9512922062e5aeec97af9d249f33ce88ae3bfa16170e6549200d3cecec0b0a952b532814be854724321c2dd1aa1c847c37b289ec4e3744aa8cdda72c19aa0bd48b5f2842ceee7d563bb2ebfd33ac96c9afb863defd23179afd2d80cf1a3f86fa4b3611e7199af23ce2890b27736ade2c8bf1a30c82d39a253ecbfd2da905974e60855c09613fd1b224c09f240578935d1e523df1828b62b463b3e45c1ed4ab3a1d39ae52c35b9c36ebd79c273c8ce530a2825c36c390fa59b3b7ff1d86c1f9269e2db643a6926e68a44fd18cff9d0239f4a0ddb46a591eb7ac5001f3f70b1eeb566873cbf056ac380a6beeb585356413933a6bda59beabbe13c5d8b57775b8d472e9f3df52b0e916c88883dbe6b3560747ed866c85c0b5b7a10a77a20a247fb25895c0829a21853e0c23ad4db3a71450b56107fc1ef86e6b616c0525c29f6f7fc0c20591388dbd4f86ba348524569f0d04807bf62d8f3218aa8f54ff0d4b654706e0b9a64c1ca5fca08dcc0a583a282536fa5ef7b7c49f1541e070f5f27bc6b535c9913e5dc1a73ee1d75c09b05ab930b25e6c013e7fe9d1bba233fdcfe62f20f2cecd1535c51ad9c46c9e241e35b26103383e7bddf88f4697fa84af3d59cec4586c98f7a05bb3696df4b226129465cbf01679a5a29fceaadc338b1f5a845dcc5fdb3ae4d8107e1f0defccbbd4d0ba6a56be646ee6065288e5d73291588f775539fffc0b381cf0de585c36742b87a9392f32dacfbb0508657fca7b4e6ae5e39b1b6792f370cee27497ba2bdef5076f52c970cb732107e1009cf0655c88ebd8349a74d2aa00f6b7621654a02b8a19ec050b515edf4cb3d54cdbb554045c06e6242f96a0eedbdc7bd9c847a3f422cbd403830106d0a4b155d609ed7aa81562df53885f0f0d8d0ee8d1217fb21b0276f80e77bfa5ef65926c341c890375b3bc3f3", 0x1000) r1 = bpf$MAP_CREATE(0x0, &(0x7f00000004c0)=@bloom_filter={0x1e, 0x0, 0x8, 0x4, 0x0, 0x1}, 0x48) ppoll(&(0x7f00000000c0)=[{r0}, {r1}], 0x2, 0x0, 0x0, 0x0) socket$inet6(0xa, 0x6, 0x0) (async) listen(r0, 0x0) (async) setsockopt(r0, 0x8000, 0x1, &(0x7f0000000540)="ce3100e83e1721513d744172ecb4749c6ddb32ad4aa73db22f719bc0b382176029ad012ee1cd90a9cc54e0766746b9ef9c56810d2378c1200825f7ad954a6378d79bdeb6a273bab56d4f1c89ab25f39aaa66ea090254c0d72bb6fcfefebc54ac3651328a1cc08b642c6bd405f6d1a43c12a34263a4568ee158b49a821ceb58af353b88b1dc059ec3ca09d4fc614773e936a809c2467a5cb28234f2941623e33fc7d0acbcd42368f184dc85b0f55ab81a2c1fe32ea0d94093e6b401e2ce8d7950d6101fc4da25b2342c37c50aace7f9fd86fab4ede257b02c30cbf7060e1d3c48b2e2f4756712e8839a5b92fd61546eb14cf98b0701619024b7cc100bc3c5e2b2d9333697de12308a21fd75ecb97d5d6697782077837f4475198e7dd2e4b173f4288d8dc284480f12c18be24b983b440774df3464f4b352934163f4ec3a78e252397d8c40f700e68090b6de8fba5f9954d626257b52256e598ccc9c0aa9f7d397d0961e43bc2109448223bf09162c2790e059b65ba7027a7cb0cc34849a90accc46411d56f5d24c8f9fd118a675efbdd8a3d7118e6cd747678072747e5861e7afa9122ca6a61908a925359a9bf022fbf65a6344748208e096c5cb9e1cc192b9e0d2b9cc4f76a1f1ab0dccd1b35252635b81a4e0c71de9b4f6fa66248cef191eb9521622ec0af64c70352d59bf8f8b5dc80c6764eb0c696645ed6641335c606a8be80595fca1cafeaaafc80d5d30a1f6efe589cae1a4f9bb1ed9803fcc1c4582e0776723569508f188c208faeceb670f808e9ffc8893effd3cb4b599c1c4976fdb7884dd285a4e15d30a10a19411f9d1b0940a127dbb7bf5b91879d14bec0a686885d60e63937377accdcd3e88bf1e944073034369ebf9d439b61de3738c2e8c503ee288972938ab47d091439bcb53804922c74e41cfb7f254b5967eaef1d8689474818db302268db4b69cfc20a34bfc9115941c2efa66f0345f2234985845cf31478ded60340e6c0d0199d094ed66cfcc83151d70a505834a5fa74fee9eab0298970a56bdccbcac7effce6286d3620254625bd9696666a9cfa428c26fe273cb2498b923380e745aea217216378d157b97abcb3435d5e15ee55eb1abdcebaaf2cc00054ea506562de7569e8aebb3154209db2c5405ae45352154b90e090a27b0c1638215649a7b72979747b2fa163248bbd4048bdf89eb72cda01b1c6559f4ababe3cba2f190ec0516b68d668ac94a7100e0a64e91e2d8e9913a4a9f233a3313bcce00c6c8b2fb0c96c599697caed036003072467d4b7ccbecaac47d7d14ada5b9c7ae074641b638830b9326380d8b2d3eba99fde0d97180198053a7a61877e2cd2734d7d95004ab73571f2c4bc47fd5af8afde89462af8a1cda361d6a259a32150da06c1343f295221b784dc4dd5c83a570a898c29513232df1744cbae8ba12ac678e027f826f10cf053fc3b391de692587dc2da640ef0b1fb5bf8fd8c947f54e51781aaad7c78d6bb13064fe2e4e067471e1cc6c8ebb4c274d7b2b7904d2cf9520fd185ffcfab56b4fad799e9467efbc29c76e21cd01c060fed97a0f5b80be30c16a002caa88962196ae794c0650e49135aeac46ea4b0ade97f65a82cf4b2be86704fde39ad460ab9935378b8a4ab3a15bca8859b8b037c04b5e5a2e131dabae850e9ba1cb6150d5fd4c588f76e65d3ac3d92cc3c1366f51fddddf134375f3b215c9fd9154501d4de7bfdb66c5acb856d06b9ec3d3243e61913a92b87a51d01491129511cd0ef65aad1dd2e8bda5e454dc5ce23bca36474ab999273e9c46f0f827fd5c9ae923c8dacaee95f006e44c284083bdfde673a36ff239d71bf8610fb0b3753d61b4acfd0db986547d3e981479636ee9c0b05a11ed12011e03a931e79d7ec89e99283b394cf6af01ffdc072631f0f9667fc6a5ca5a93121d5c115f8c88754cff7d9627ebe1d215100824d61869fcf59fd60e086df5d2418a64f6f81de4978453b39c1832607c21e10e9812550615bc09ab29d0f6b565a73e41b23a0b1624702e23d42a48cf07fd5871605bd980b310ed258d3d2ff0f119b1b2832191410665cbbcafc64d26de6368a85eb475d459635ae0693de2589887e21699d8688656963351bedfbb04fbfbb64089b0510087d88fe18d98faa11e2b9ae696635eab03e4a223f103ba52dde0d234c9ca84ab9225de350b693c492d7194ad68e9b1735c185d0d0291e6fb8f1e859c9bce109aae9b0aa0103fc9d743d6f07a1871ae211acf812cb2457d2c665fc5fc5f2dd65745168a34ad4c692bc0436ae9b9efb48ec0f379e9efd675cf9957c6af3a7a851464a2f3406ca147a1a162eeda501a2469e117a0769e9c731f25373fb9b3fc6c8ec564f0e26ee8407fdba89be6e067a875042e1b06a6de3b4563df45d3a9a2f734de9291fc24958b46a9820cb3f21e8b9aeab33d97ae235fc59e6a65b80367b3c7efb2abb507baa5e41c39e637f4899dddd85647f5b5cae97b8dd0126d604d7ac451dff0eb01f6a006625629ca5e355fd70f227a5b42de6ef8ee77ba10ccf39e22852fa7645a6054b72fbdf415aabd7c9a7490b6328629b7240909d85959203f82defbdb303240f3773521d7ffed6be3a1ad43add7ae079976f415bfae1ffcc68b7efc0b4c62f99036c68d9e62425025fe55a1b1745776263319563470cd0617f5127df0605c934667097173be10492b3889a204a7ec6aae0027f6e02c72649388d7cad750d1af898520443be8ea9b99eb5664fd191ea835362fdda778ee0799986f6aade1ac7038cf58803b5733352b6c81dd0f282310765d5cb98b318a0a88888f8a8a4c9f1c18320729cf5cb1204c4a4b436648ce3a1b2206fe477f4a4990b299e20270c5ab7f4f23cc11c5a86c649e347f03c41649c5a804bbc78a2537b612e8fc48079872ce5f129f2a3e325e34a48850eb572ba886331b986552ca2091d6fecf701f6f688150abec1f7231c2a79518114c990fed44ee72ec6161b03f6c6a7a3d253b6fa9a8b8a28ffc55117ea8c96f2120fc4f1c4c6e1798c1682cc7fcb82333d7bdde94bf54a4ae039c597f79740d6f8bf2f2428ed5858e0d0e8d20e4d9d4934215e7b541f2d71870f89e2186ae5759f78a964edbefe5e6409b16c1f2df02e198ab6587d4f57999c66167a295598ce8549cb72140d2261e2f75d1b94a900f751d7cc96ab118c04a93dc4cae74699f2f0128e8123c3e6a46e282591eae11ae3c26d4310eb9f6caa2e427e6f2bf94e0b5e8a84f01f0e22e69a0bb679fa04231c67c285aa1906e2b106eb9a8b1d2a1c62593f62bf7376dc978582da6e62524573306f312d08ca36c01d059db68fe6a2c45c525e959e40f20be5980bfac86f87509fd4d90aff4a7459cb7c0960f167b8df4986719dea196305ca0c849b0a02ef917857a5da4d47c2b0bae6c26a9627148da655ee696b8c95e7c075dbd9801586fe99fbe33e5bac4f65d8dfb7a70c2911d816af01c802c360897c690f564ac05aa62a559842685a27c834883ec3bbf4881b356ef218cbaee4cad0994677e97a7d67c49b030fe8949439add40bffdf2d332141d0ae6786bd90fa0770641e0374c337780d2d871975c82a52fcc1df3090576ac56457d6644e9d4711ad22e808b2eff8e96951988d0496369c0d3f1920b222041ee9f38d1ffec28b742f9c33039612c51723312442a3e9698d4094df3307168bd7cd42e2df293a1b3801aa36b1a974c2fb68bd069a907db328301ec234bbb7879ce13975557f8620c26dcf74d22fe77d5fc30dae73db3365a9bd3267e54287fe56fb123e4d7ef14905d798cb77f6041b8f9d5c767e764b57eee391f642e1a6abc83aeab77d42db058be3d3ae6db3dce42bdda99c538cec504edd80f9e2e1b26cb5ff576d1949c3a3dec754be8f3e37b59703484e1e15f9c504a6313e0f2a517626f3bf3bed30fd851a86a60a3d7c667742904c0286a42f17f991527c88e8ea52a011621b4617d43c340458d10d20be60e57bbcd515cbe829ad28275c4d91717532b4eb65a5af76793df928d3b231b973ebdb80aeae9bf36bb938ea8379ad8912d7a5b454e829485781b2d27fc05dfae8f92ade2e823bc3686db670d63fffb4c35d7058cc14cd7707cedeba18d657c81f4a128b527c48309d1c5cf8ec9eee9240bcb27281154072b8e8842b94ca2ac5c85d88181af8744f0e4ea38e18a22aca606dee32fac7607220ffeaee5a65ca4236709135ea01f1aca6718554d5162683166aad6b83b5c692b66cd3688afcc2733ac45bc9a4b4dbfa7e9aa121ead5a3cbde1b857ca3caa40dca3ad07fb1539628eb620a221ae82ca6c654c23db2334f448d1644684ec5e73d9b7108e7ad4cd39f14b84863da656dd018b472d814d5854bf3cfe79488740a86c8c2f39010d6cb09461035b4d3fa39e9144d39e8f7f0e552fb448a43075c3f860adc2d56edf83ce4f120052cdddab0539b444392030017b8b04d40f1327d1f72e515909e79992fe8ae3caf18d5d0bb041304fde7eacaf85bd7093daf9b413ec3a4bca8900c82b1a3a973fde6b5350eba00098d635eabd0b69d95a07ae744329f771ff8ea4fc226edb5c91af4dd8b3523aa510ead13161d41df17ba169b6988126f04410a27050da3d2d9784bef5ef93c4c739f4b4ddd84c0ca25d68a0713a6250056336ca9e0d9bf5b7b680beb1a05fbecadccd6b2a2fa31e1265cc1af5db33222aa38709b760033705acfe0cbdbb2477c68f1f2ca8eff3045141fb57d143911bed0192157530d4ad3071825a2ae7dcf8b6ef431af4b28312039cf719204e7bef3aaf3442664575c959b808cc9bb7b478f9c3ea9dfc6e29f9512922062e5aeec97af9d249f33ce88ae3bfa16170e6549200d3cecec0b0a952b532814be854724321c2dd1aa1c847c37b289ec4e3744aa8cdda72c19aa0bd48b5f2842ceee7d563bb2ebfd33ac96c9afb863defd23179afd2d80cf1a3f86fa4b3611e7199af23ce2890b27736ade2c8bf1a30c82d39a253ecbfd2da905974e60855c09613fd1b224c09f240578935d1e523df1828b62b463b3e45c1ed4ab3a1d39ae52c35b9c36ebd79c273c8ce530a2825c36c390fa59b3b7ff1d86c1f9269e2db643a6926e68a44fd18cff9d0239f4a0ddb46a591eb7ac5001f3f70b1eeb566873cbf056ac380a6beeb585356413933a6bda59beabbe13c5d8b57775b8d472e9f3df52b0e916c88883dbe6b3560747ed866c85c0b5b7a10a77a20a247fb25895c0829a21853e0c23ad4db3a71450b56107fc1ef86e6b616c0525c29f6f7fc0c20591388dbd4f86ba348524569f0d04807bf62d8f3218aa8f54ff0d4b654706e0b9a64c1ca5fca08dcc0a583a282536fa5ef7b7c49f1541e070f5f27bc6b535c9913e5dc1a73ee1d75c09b05ab930b25e6c013e7fe9d1bba233fdcfe62f20f2cecd1535c51ad9c46c9e241e35b26103383e7bddf88f4697fa84af3d59cec4586c98f7a05bb3696df4b226129465cbf01679a5a29fceaadc338b1f5a845dcc5fdb3ae4d8107e1f0defccbbd4d0ba6a56be646ee6065288e5d73291588f775539fffc0b381cf0de585c36742b87a9392f32dacfbb0508657fca7b4e6ae5e39b1b6792f370cee27497ba2bdef5076f52c970cb732107e1009cf0655c88ebd8349a74d2aa00f6b7621654a02b8a19ec050b515edf4cb3d54cdbb554045c06e6242f96a0eedbdc7bd9c847a3f422cbd403830106d0a4b155d609ed7aa81562df53885f0f0d8d0ee8d1217fb21b0276f80e77bfa5ef65926c341c890375b3bc3f3", 0x1000) (async) bpf$MAP_CREATE(0x0, &(0x7f00000004c0)=@bloom_filter={0x1e, 0x0, 0x8, 0x4, 0x0, 0x1}, 0x48) (async) ppoll(&(0x7f00000000c0)=[{r0}, {r1}], 0x2, 0x0, 0x0, 0x0) (async) 19:47:44 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xeb8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:44 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x76e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:44 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x70150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2670.421319][T27457] FAULT_INJECTION: forcing a failure. [ 2670.421319][T27457] name failslab, interval 1, probability 0, space 0, times 0 [ 2670.434546][T27457] CPU: 1 PID: 27457 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2670.445042][T27457] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2670.455163][T27457] Call Trace: [ 2670.458489][T27457] [ 2670.461474][T27457] dump_stack_lvl+0x125/0x1b0 [ 2670.466230][T27457] should_fail_ex+0x496/0x5b0 [ 2670.470980][T27457] ? __es_tree_search.isra.0+0x210/0x210 [ 2670.476681][T27457] should_failslab+0x9/0x20 [ 2670.481261][T27457] kmem_cache_alloc+0x69/0x3b0 [ 2670.486107][T27457] __es_insert_extent+0x741/0x1470 [ 2670.491303][T27457] ext4_es_insert_delayed_block+0x2ac/0x610 [ 2670.497280][T27457] ? ext4_is_pending+0x200/0x200 [ 2670.502285][T27457] ? percpu_counter_add_batch+0x132/0x1f0 [ 2670.508092][T27457] ? do_raw_spin_unlock+0x173/0x230 [ 2670.513383][T27457] ? _raw_spin_unlock+0x28/0x40 [ 2670.518326][T27457] ext4_da_get_block_prep+0x850/0x1340 [ 2670.523882][T27457] ? ext4_dax_writepages+0xb30/0xb30 [ 2670.529247][T27457] ? ext4_block_write_begin+0xc08/0xe30 [ 2670.534855][T27457] ? reacquire_held_locks+0x4b0/0x4b0 [ 2670.540297][T27457] ? folio_flags.constprop.0+0x56/0x150 [ 2670.546264][T27457] ext4_block_write_begin+0x3da/0xe30 [ 2670.551703][T27457] ? ext4_dax_writepages+0xb30/0xb30 [ 2670.557046][T27457] ? mpage_map_and_submit_buffers+0xab0/0xab0 [ 2670.563173][T27457] ? __filemap_get_folio+0x1e7/0x990 [ 2670.568515][T27457] ext4_da_write_begin+0x40a/0x8c0 [ 2670.573703][T27457] ? ext4_write_begin+0x1100/0x1100 [ 2670.578969][T27457] generic_perform_write+0x278/0x600 [ 2670.584325][T27457] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2670.589761][T27457] ? iunique+0x380/0x380 [ 2670.594072][T27457] ext4_buffered_write_iter+0x11f/0x3c0 [ 2670.599672][T27457] ext4_file_write_iter+0x7ee/0x1950 [ 2670.605014][T27457] ? lock_sync+0x190/0x190 [ 2670.609482][T27457] ? ext4_file_splice_read+0x150/0x150 [ 2670.615004][T27457] vfs_write+0x650/0xe40 [ 2670.619350][T27457] ? kernel_write+0x6c0/0x6c0 [ 2670.624094][T27457] ? __fget_files+0x279/0x410 [ 2670.628849][T27457] ksys_write+0x12f/0x250 [ 2670.633241][T27457] ? __ia32_sys_read+0xb0/0xb0 [ 2670.638068][T27457] ? syscall_enter_from_user_mode+0x26/0x80 [ 2670.644022][T27457] do_syscall_64+0x38/0xb0 [ 2670.648485][T27457] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2670.654433][T27457] RIP: 0033:0x7f8cd127cae9 [ 2670.658884][T27457] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2670.678541][T27457] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2670.686996][T27457] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2670.695002][T27457] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2670.703034][T27457] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2670.711042][T27457] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2670.719052][T27457] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2670.727090][T27457] 19:47:44 executing program 3: r0 = socket$inet6(0xa, 0x6, 0x0) listen(r0, 0x0) (async) setsockopt(r0, 0x8000, 0x1, &(0x7f0000000540)="ce3100e83e1721513d744172ecb4749c6ddb32ad4aa73db22f719bc0b382176029ad012ee1cd90a9cc54e0766746b9ef9c56810d2378c1200825f7ad954a6378d79bdeb6a273bab56d4f1c89ab25f39aaa66ea090254c0d72bb6fcfefebc54ac3651328a1cc08b642c6bd405f6d1a43c12a34263a4568ee158b49a821ceb58af353b88b1dc059ec3ca09d4fc614773e936a809c2467a5cb28234f2941623e33fc7d0acbcd42368f184dc85b0f55ab81a2c1fe32ea0d94093e6b401e2ce8d7950d6101fc4da25b2342c37c50aace7f9fd86fab4ede257b02c30cbf7060e1d3c48b2e2f4756712e8839a5b92fd61546eb14cf98b0701619024b7cc100bc3c5e2b2d9333697de12308a21fd75ecb97d5d6697782077837f4475198e7dd2e4b173f4288d8dc284480f12c18be24b983b440774df3464f4b352934163f4ec3a78e252397d8c40f700e68090b6de8fba5f9954d626257b52256e598ccc9c0aa9f7d397d0961e43bc2109448223bf09162c2790e059b65ba7027a7cb0cc34849a90accc46411d56f5d24c8f9fd118a675efbdd8a3d7118e6cd747678072747e5861e7afa9122ca6a61908a925359a9bf022fbf65a6344748208e096c5cb9e1cc192b9e0d2b9cc4f76a1f1ab0dccd1b35252635b81a4e0c71de9b4f6fa66248cef191eb9521622ec0af64c70352d59bf8f8b5dc80c6764eb0c696645ed6641335c606a8be80595fca1cafeaaafc80d5d30a1f6efe589cae1a4f9bb1ed9803fcc1c4582e0776723569508f188c208faeceb670f808e9ffc8893effd3cb4b599c1c4976fdb7884dd285a4e15d30a10a19411f9d1b0940a127dbb7bf5b91879d14bec0a686885d60e63937377accdcd3e88bf1e944073034369ebf9d439b61de3738c2e8c503ee288972938ab47d091439bcb53804922c74e41cfb7f254b5967eaef1d8689474818db302268db4b69cfc20a34bfc9115941c2efa66f0345f2234985845cf31478ded60340e6c0d0199d094ed66cfcc83151d70a505834a5fa74fee9eab0298970a56bdccbcac7effce6286d3620254625bd9696666a9cfa428c26fe273cb2498b923380e745aea217216378d157b97abcb3435d5e15ee55eb1abdcebaaf2cc00054ea506562de7569e8aebb3154209db2c5405ae45352154b90e090a27b0c1638215649a7b72979747b2fa163248bbd4048bdf89eb72cda01b1c6559f4ababe3cba2f190ec0516b68d668ac94a7100e0a64e91e2d8e9913a4a9f233a3313bcce00c6c8b2fb0c96c599697caed036003072467d4b7ccbecaac47d7d14ada5b9c7ae074641b638830b9326380d8b2d3eba99fde0d97180198053a7a61877e2cd2734d7d95004ab73571f2c4bc47fd5af8afde89462af8a1cda361d6a259a32150da06c1343f295221b784dc4dd5c83a570a898c29513232df1744cbae8ba12ac678e027f826f10cf053fc3b391de692587dc2da640ef0b1fb5bf8fd8c947f54e51781aaad7c78d6bb13064fe2e4e067471e1cc6c8ebb4c274d7b2b7904d2cf9520fd185ffcfab56b4fad799e9467efbc29c76e21cd01c060fed97a0f5b80be30c16a002caa88962196ae794c0650e49135aeac46ea4b0ade97f65a82cf4b2be86704fde39ad460ab9935378b8a4ab3a15bca8859b8b037c04b5e5a2e131dabae850e9ba1cb6150d5fd4c588f76e65d3ac3d92cc3c1366f51fddddf134375f3b215c9fd9154501d4de7bfdb66c5acb856d06b9ec3d3243e61913a92b87a51d01491129511cd0ef65aad1dd2e8bda5e454dc5ce23bca36474ab999273e9c46f0f827fd5c9ae923c8dacaee95f006e44c284083bdfde673a36ff239d71bf8610fb0b3753d61b4acfd0db986547d3e981479636ee9c0b05a11ed12011e03a931e79d7ec89e99283b394cf6af01ffdc072631f0f9667fc6a5ca5a93121d5c115f8c88754cff7d9627ebe1d215100824d61869fcf59fd60e086df5d2418a64f6f81de4978453b39c1832607c21e10e9812550615bc09ab29d0f6b565a73e41b23a0b1624702e23d42a48cf07fd5871605bd980b310ed258d3d2ff0f119b1b2832191410665cbbcafc64d26de6368a85eb475d459635ae0693de2589887e21699d8688656963351bedfbb04fbfbb64089b0510087d88fe18d98faa11e2b9ae696635eab03e4a223f103ba52dde0d234c9ca84ab9225de350b693c492d7194ad68e9b1735c185d0d0291e6fb8f1e859c9bce109aae9b0aa0103fc9d743d6f07a1871ae211acf812cb2457d2c665fc5fc5f2dd65745168a34ad4c692bc0436ae9b9efb48ec0f379e9efd675cf9957c6af3a7a851464a2f3406ca147a1a162eeda501a2469e117a0769e9c731f25373fb9b3fc6c8ec564f0e26ee8407fdba89be6e067a875042e1b06a6de3b4563df45d3a9a2f734de9291fc24958b46a9820cb3f21e8b9aeab33d97ae235fc59e6a65b80367b3c7efb2abb507baa5e41c39e637f4899dddd85647f5b5cae97b8dd0126d604d7ac451dff0eb01f6a006625629ca5e355fd70f227a5b42de6ef8ee77ba10ccf39e22852fa7645a6054b72fbdf415aabd7c9a7490b6328629b7240909d85959203f82defbdb303240f3773521d7ffed6be3a1ad43add7ae079976f415bfae1ffcc68b7efc0b4c62f99036c68d9e62425025fe55a1b1745776263319563470cd0617f5127df0605c934667097173be10492b3889a204a7ec6aae0027f6e02c72649388d7cad750d1af898520443be8ea9b99eb5664fd191ea835362fdda778ee0799986f6aade1ac7038cf58803b5733352b6c81dd0f282310765d5cb98b318a0a88888f8a8a4c9f1c18320729cf5cb1204c4a4b436648ce3a1b2206fe477f4a4990b299e20270c5ab7f4f23cc11c5a86c649e347f03c41649c5a804bbc78a2537b612e8fc48079872ce5f129f2a3e325e34a48850eb572ba886331b986552ca2091d6fecf701f6f688150abec1f7231c2a79518114c990fed44ee72ec6161b03f6c6a7a3d253b6fa9a8b8a28ffc55117ea8c96f2120fc4f1c4c6e1798c1682cc7fcb82333d7bdde94bf54a4ae039c597f79740d6f8bf2f2428ed5858e0d0e8d20e4d9d4934215e7b541f2d71870f89e2186ae5759f78a964edbefe5e6409b16c1f2df02e198ab6587d4f57999c66167a295598ce8549cb72140d2261e2f75d1b94a900f751d7cc96ab118c04a93dc4cae74699f2f0128e8123c3e6a46e282591eae11ae3c26d4310eb9f6caa2e427e6f2bf94e0b5e8a84f01f0e22e69a0bb679fa04231c67c285aa1906e2b106eb9a8b1d2a1c62593f62bf7376dc978582da6e62524573306f312d08ca36c01d059db68fe6a2c45c525e959e40f20be5980bfac86f87509fd4d90aff4a7459cb7c0960f167b8df4986719dea196305ca0c849b0a02ef917857a5da4d47c2b0bae6c26a9627148da655ee696b8c95e7c075dbd9801586fe99fbe33e5bac4f65d8dfb7a70c2911d816af01c802c360897c690f564ac05aa62a559842685a27c834883ec3bbf4881b356ef218cbaee4cad0994677e97a7d67c49b030fe8949439add40bffdf2d332141d0ae6786bd90fa0770641e0374c337780d2d871975c82a52fcc1df3090576ac56457d6644e9d4711ad22e808b2eff8e96951988d0496369c0d3f1920b222041ee9f38d1ffec28b742f9c33039612c51723312442a3e9698d4094df3307168bd7cd42e2df293a1b3801aa36b1a974c2fb68bd069a907db328301ec234bbb7879ce13975557f8620c26dcf74d22fe77d5fc30dae73db3365a9bd3267e54287fe56fb123e4d7ef14905d798cb77f6041b8f9d5c767e764b57eee391f642e1a6abc83aeab77d42db058be3d3ae6db3dce42bdda99c538cec504edd80f9e2e1b26cb5ff576d1949c3a3dec754be8f3e37b59703484e1e15f9c504a6313e0f2a517626f3bf3bed30fd851a86a60a3d7c667742904c0286a42f17f991527c88e8ea52a011621b4617d43c340458d10d20be60e57bbcd515cbe829ad28275c4d91717532b4eb65a5af76793df928d3b231b973ebdb80aeae9bf36bb938ea8379ad8912d7a5b454e829485781b2d27fc05dfae8f92ade2e823bc3686db670d63fffb4c35d7058cc14cd7707cedeba18d657c81f4a128b527c48309d1c5cf8ec9eee9240bcb27281154072b8e8842b94ca2ac5c85d88181af8744f0e4ea38e18a22aca606dee32fac7607220ffeaee5a65ca4236709135ea01f1aca6718554d5162683166aad6b83b5c692b66cd3688afcc2733ac45bc9a4b4dbfa7e9aa121ead5a3cbde1b857ca3caa40dca3ad07fb1539628eb620a221ae82ca6c654c23db2334f448d1644684ec5e73d9b7108e7ad4cd39f14b84863da656dd018b472d814d5854bf3cfe79488740a86c8c2f39010d6cb09461035b4d3fa39e9144d39e8f7f0e552fb448a43075c3f860adc2d56edf83ce4f120052cdddab0539b444392030017b8b04d40f1327d1f72e515909e79992fe8ae3caf18d5d0bb041304fde7eacaf85bd7093daf9b413ec3a4bca8900c82b1a3a973fde6b5350eba00098d635eabd0b69d95a07ae744329f771ff8ea4fc226edb5c91af4dd8b3523aa510ead13161d41df17ba169b6988126f04410a27050da3d2d9784bef5ef93c4c739f4b4ddd84c0ca25d68a0713a6250056336ca9e0d9bf5b7b680beb1a05fbecadccd6b2a2fa31e1265cc1af5db33222aa38709b760033705acfe0cbdbb2477c68f1f2ca8eff3045141fb57d143911bed0192157530d4ad3071825a2ae7dcf8b6ef431af4b28312039cf719204e7bef3aaf3442664575c959b808cc9bb7b478f9c3ea9dfc6e29f9512922062e5aeec97af9d249f33ce88ae3bfa16170e6549200d3cecec0b0a952b532814be854724321c2dd1aa1c847c37b289ec4e3744aa8cdda72c19aa0bd48b5f2842ceee7d563bb2ebfd33ac96c9afb863defd23179afd2d80cf1a3f86fa4b3611e7199af23ce2890b27736ade2c8bf1a30c82d39a253ecbfd2da905974e60855c09613fd1b224c09f240578935d1e523df1828b62b463b3e45c1ed4ab3a1d39ae52c35b9c36ebd79c273c8ce530a2825c36c390fa59b3b7ff1d86c1f9269e2db643a6926e68a44fd18cff9d0239f4a0ddb46a591eb7ac5001f3f70b1eeb566873cbf056ac380a6beeb585356413933a6bda59beabbe13c5d8b57775b8d472e9f3df52b0e916c88883dbe6b3560747ed866c85c0b5b7a10a77a20a247fb25895c0829a21853e0c23ad4db3a71450b56107fc1ef86e6b616c0525c29f6f7fc0c20591388dbd4f86ba348524569f0d04807bf62d8f3218aa8f54ff0d4b654706e0b9a64c1ca5fca08dcc0a583a282536fa5ef7b7c49f1541e070f5f27bc6b535c9913e5dc1a73ee1d75c09b05ab930b25e6c013e7fe9d1bba233fdcfe62f20f2cecd1535c51ad9c46c9e241e35b26103383e7bddf88f4697fa84af3d59cec4586c98f7a05bb3696df4b226129465cbf01679a5a29fceaadc338b1f5a845dcc5fdb3ae4d8107e1f0defccbbd4d0ba6a56be646ee6065288e5d73291588f775539fffc0b381cf0de585c36742b87a9392f32dacfbb0508657fca7b4e6ae5e39b1b6792f370cee27497ba2bdef5076f52c970cb732107e1009cf0655c88ebd8349a74d2aa00f6b7621654a02b8a19ec050b515edf4cb3d54cdbb554045c06e6242f96a0eedbdc7bd9c847a3f422cbd403830106d0a4b155d609ed7aa81562df53885f0f0d8d0ee8d1217fb21b0276f80e77bfa5ef65926c341c890375b3bc3f3", 0x1000) r1 = bpf$MAP_CREATE(0x0, &(0x7f00000004c0)=@bloom_filter={0x1e, 0x0, 0x8, 0x4, 0x0, 0x1}, 0x48) ppoll(&(0x7f00000000c0)=[{r0}, {r1}], 0x2, 0x0, 0x0, 0x0) 19:47:44 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:44 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 5) [ 2670.908774][T27455] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:45 executing program 3: r0 = socket$inet6(0xa, 0x2, 0x0) r1 = socket(0x10, 0x3, 0x0) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$BATADV_CMD_GET_MESH(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffff0500000000", @ANYRES32=r4, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r5, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) connect$inet6(r5, &(0x7f0000000040)={0xa, 0x0, 0x0, @dev, 0xf}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r5, 0x6, 0x1f, &(0x7f0000000000), 0x4) setsockopt$inet6_tcp_TLS_TX(r5, 0x11a, 0x2, &(0x7f0000000180)=@gcm_128={{0x303, 0x38}, "00000100", "6a1d45a8ef8ee704328c671d000000f6", "94a92000", "920b00652000"}, 0x28) sendto$inet6(r5, &(0x7f00000002c0)="31f52096bd9d5914f4b6b22787465ca98b5874f329ac77e9e1f11f05753e50", 0x1f, 0x20000004, &(0x7f0000000300)={0xa, 0x4e23, 0x4, @loopback, 0x2}, 0x1c) sendmsg$nl_route_sched(r1, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000340)=@newqdisc={0x50, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r4, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_htb={{0x8}, {0x24, 0x2, [@TCA_HTB_INIT={0x18}, @TCA_HTB_DIRECT_QLEN={0x8}]}}]}, 0x50}}, 0x0) r6 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$TIPC_NL_LINK_RESET_STATS(r6, &(0x7f0000000240)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000200)={&(0x7f00000003c0)={0x90, 0x0, 0x400, 0x70bd29, 0x25dfdbfe, {}, [@TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x60e}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0xffffffff}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x5}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x6}]}, @TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x2}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x7d7}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x9}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x1f}]}, @TIPC_NLA_MON={0x34, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_REF={0x8, 0x2, 0xff}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x400}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x9}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x9}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x7fffffff}]}]}, 0x90}, 0x1, 0x0, 0x0, 0x28000}, 0x20000000) sendmmsg$inet(r0, &(0x7f0000000280)=[{{&(0x7f00000001c0)={0x2, 0x4e21, @multicast1=0xe0000300}, 0x10, 0x0, 0x0, &(0x7f0000000000)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r4, @empty}}}], 0x20}}], 0x1, 0x0) 19:47:45 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2671.051774][T27455] bond1248: entered promiscuous mode [ 2671.057971][T27455] 8021q: adding VLAN 0 to HW filter on device bond1248 [ 2671.061265][T27481] FAULT_INJECTION: forcing a failure. [ 2671.061265][T27481] name failslab, interval 1, probability 0, space 0, times 0 [ 2671.080953][T27481] CPU: 0 PID: 27481 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2671.091457][T27481] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 19:47:45 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2671.101573][T27481] Call Trace: [ 2671.104902][T27481] [ 2671.107883][T27481] dump_stack_lvl+0x125/0x1b0 [ 2671.112649][T27481] should_fail_ex+0x496/0x5b0 [ 2671.117414][T27481] ? __es_tree_search.isra.0+0x210/0x210 [ 2671.123126][T27481] should_failslab+0x9/0x20 [ 2671.127709][T27481] kmem_cache_alloc+0x69/0x3b0 [ 2671.132570][T27481] __es_insert_extent+0x741/0x1470 [ 2671.137781][T27481] ext4_es_insert_delayed_block+0x2ac/0x610 [ 2671.143775][T27481] ? ext4_is_pending+0x200/0x200 [ 2671.148789][T27481] ? percpu_counter_add_batch+0x132/0x1f0 [ 2671.154616][T27481] ? do_raw_spin_unlock+0x173/0x230 [ 2671.159909][T27481] ? _raw_spin_unlock+0x28/0x40 [ 2671.164851][T27481] ext4_da_get_block_prep+0x850/0x1340 [ 2671.170410][T27481] ? ext4_dax_writepages+0xb30/0xb30 [ 2671.175797][T27481] ? ext4_block_write_begin+0xc08/0xe30 [ 2671.181437][T27481] ? reacquire_held_locks+0x4b0/0x4b0 [ 2671.186911][T27481] ? folio_flags.constprop.0+0x56/0x150 [ 2671.192565][T27481] ext4_block_write_begin+0x3da/0xe30 [ 2671.198029][T27481] ? ext4_dax_writepages+0xb30/0xb30 [ 2671.203396][T27481] ? mpage_map_and_submit_buffers+0xab0/0xab0 [ 2671.209558][T27481] ? __filemap_get_folio+0x1e7/0x990 [ 2671.214924][T27481] ext4_da_write_begin+0x40a/0x8c0 [ 2671.220146][T27481] ? ext4_write_begin+0x1100/0x1100 [ 2671.225473][T27481] generic_perform_write+0x278/0x600 [ 2671.230878][T27481] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2671.236349][T27481] ? iunique+0x380/0x380 [ 2671.240675][T27481] ext4_buffered_write_iter+0x11f/0x3c0 [ 2671.246300][T27481] ext4_file_write_iter+0x7ee/0x1950 [ 2671.251647][T27481] ? lock_sync+0x190/0x190 [ 2671.256106][T27481] ? ext4_file_splice_read+0x150/0x150 [ 2671.261618][T27481] vfs_write+0x650/0xe40 [ 2671.265911][T27481] ? kernel_write+0x6c0/0x6c0 [ 2671.270640][T27481] ? __fget_files+0x279/0x410 [ 2671.275373][T27481] ksys_write+0x12f/0x250 [ 2671.279749][T27481] ? __ia32_sys_read+0xb0/0xb0 [ 2671.284569][T27481] ? syscall_enter_from_user_mode+0x26/0x80 [ 2671.290531][T27481] do_syscall_64+0x38/0xb0 [ 2671.294995][T27481] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2671.300933][T27481] RIP: 0033:0x7f8cd127cae9 [ 2671.305379][T27481] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2671.325034][T27481] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2671.333491][T27481] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2671.341526][T27481] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2671.349540][T27481] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2671.357540][T27481] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2671.365556][T27481] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2671.373596][T27481] 19:47:45 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000380), 0x101bf) [ 2671.507823][T27462] bond1248: (slave bridge1177): making interface the new active one [ 2671.532183][T27462] bridge1177: entered promiscuous mode [ 2671.567953][T27462] bond1248: (slave bridge1177): Enslaving as an active interface with an up link [ 2671.606592][T27466] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:45 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x77e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:45 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, 0x0, 0x0) [ 2671.660751][T27466] workqueue: Failed to create a rescuer kthread for wq "bond1207": -EINTR [ 2671.734897][T27469] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2671.780143][T27469] workqueue: Failed to create a rescuer kthread for wq "bond1133": -EINTR [ 2672.098975][T27480] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. 19:47:46 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xec8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:46 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 6) 19:47:46 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, 0x0, 0x0) 19:47:46 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x71150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:46 executing program 3: r0 = socket$inet6(0xa, 0x2, 0x0) (async) r1 = socket(0x10, 0x3, 0x0) (async) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$BATADV_CMD_GET_MESH(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffff0500000000", @ANYRES32=r4, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r5, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) (async) connect$inet6(r5, &(0x7f0000000040)={0xa, 0x0, 0x0, @dev, 0xf}, 0x1c) (async) setsockopt$inet6_tcp_TCP_ULP(r5, 0x6, 0x1f, &(0x7f0000000000), 0x4) setsockopt$inet6_tcp_TLS_TX(r5, 0x11a, 0x2, &(0x7f0000000180)=@gcm_128={{0x303, 0x38}, "00000100", "6a1d45a8ef8ee704328c671d000000f6", "94a92000", "920b00652000"}, 0x28) (async) sendto$inet6(r5, &(0x7f00000002c0)="31f52096bd9d5914f4b6b22787465ca98b5874f329ac77e9e1f11f05753e50", 0x1f, 0x20000004, &(0x7f0000000300)={0xa, 0x4e23, 0x4, @loopback, 0x2}, 0x1c) (async) sendmsg$nl_route_sched(r1, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000340)=@newqdisc={0x50, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r4, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_htb={{0x8}, {0x24, 0x2, [@TCA_HTB_INIT={0x18}, @TCA_HTB_DIRECT_QLEN={0x8}]}}]}, 0x50}}, 0x0) (async) r6 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$TIPC_NL_LINK_RESET_STATS(r6, &(0x7f0000000240)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000200)={&(0x7f00000003c0)={0x90, 0x0, 0x400, 0x70bd29, 0x25dfdbfe, {}, [@TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x60e}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0xffffffff}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x5}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x6}]}, @TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x2}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x7d7}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x9}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x1f}]}, @TIPC_NLA_MON={0x34, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_REF={0x8, 0x2, 0xff}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x400}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x9}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x9}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x7fffffff}]}]}, 0x90}, 0x1, 0x0, 0x0, 0x28000}, 0x20000000) (async) sendmmsg$inet(r0, &(0x7f0000000280)=[{{&(0x7f00000001c0)={0x2, 0x4e21, @multicast1=0xe0000300}, 0x10, 0x0, 0x0, &(0x7f0000000000)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r4, @empty}}}], 0x20}}], 0x1, 0x0) [ 2672.196681][T27496] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2672.269184][T27505] FAULT_INJECTION: forcing a failure. [ 2672.269184][T27505] name fail_usercopy, interval 1, probability 0, space 0, times 0 [ 2672.282895][T27505] CPU: 1 PID: 27505 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2672.293388][T27505] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2672.303502][T27505] Call Trace: [ 2672.306829][T27505] [ 2672.309814][T27505] dump_stack_lvl+0x125/0x1b0 [ 2672.314571][T27505] should_fail_ex+0x496/0x5b0 [ 2672.319336][T27505] copyin+0x1b/0xa0 [ 2672.323238][T27505] copy_page_from_iter_atomic+0x257/0x13e0 [ 2672.329132][T27505] ? ext4_da_write_begin+0x228/0x8c0 [ 2672.334517][T27505] ? csum_and_copy_to_iter+0x1430/0x1430 [ 2672.340230][T27505] ? ext4_write_begin+0x1100/0x1100 [ 2672.345797][T27505] generic_perform_write+0x2e8/0x600 [ 2672.351190][T27505] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2672.356662][T27505] ? iunique+0x380/0x380 [ 2672.360998][T27505] ext4_buffered_write_iter+0x11f/0x3c0 [ 2672.366630][T27505] ext4_file_write_iter+0x7ee/0x1950 [ 2672.372007][T27505] ? lock_sync+0x190/0x190 [ 2672.376509][T27505] ? ext4_file_splice_read+0x150/0x150 [ 2672.382062][T27505] vfs_write+0x650/0xe40 [ 2672.386405][T27505] ? kernel_write+0x6c0/0x6c0 [ 2672.391190][T27505] ? __fget_files+0x279/0x410 [ 2672.395980][T27505] ksys_write+0x12f/0x250 [ 2672.400412][T27505] ? __ia32_sys_read+0xb0/0xb0 [ 2672.405282][T27505] ? syscall_enter_from_user_mode+0x26/0x80 [ 2672.411306][T27505] do_syscall_64+0x38/0xb0 [ 2672.415812][T27505] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2672.421799][T27505] RIP: 0033:0x7f8cd127cae9 [ 2672.426272][T27505] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2672.446120][T27505] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2672.454607][T27505] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 19:47:46 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, 0x0, 0x0) [ 2672.462639][T27505] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2672.470693][T27505] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2672.478725][T27505] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2672.486758][T27505] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2672.494811][T27505] 19:47:46 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 1) 19:47:46 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 7) [ 2672.758617][T27496] bond1249: entered promiscuous mode [ 2672.780803][T27522] FAULT_INJECTION: forcing a failure. [ 2672.780803][T27522] name fail_page_alloc, interval 1, probability 0, space 0, times 0 [ 2672.797225][T27496] 8021q: adding VLAN 0 to HW filter on device bond1249 [ 2672.810939][T27522] CPU: 1 PID: 27522 Comm: syz-executor.0 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2672.821458][T27522] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2672.831663][T27522] Call Trace: [ 2672.834992][T27522] [ 2672.837978][T27522] dump_stack_lvl+0x125/0x1b0 [ 2672.842736][T27522] should_fail_ex+0x496/0x5b0 [ 2672.847509][T27522] __should_fail_alloc_page+0xe7/0x130 [ 2672.853061][T27522] prepare_alloc_pages.constprop.0+0x16f/0x550 [ 2672.859314][T27522] __alloc_pages+0x14e/0x4a0 [ 2672.864006][T27522] ? __alloc_pages_slowpath.constprop.0+0x2360/0x2360 [ 2672.870888][T27522] ? xas_start+0x14f/0x780 [ 2672.875425][T27522] alloc_pages+0x1a9/0x270 [ 2672.879926][T27522] folio_alloc+0x1e/0x60 [ 2672.884252][T27522] filemap_alloc_folio+0x3bb/0x490 [ 2672.889460][T27522] ? folio_wake_bit+0x270/0x270 [ 2672.894421][T27522] ? asm_exc_page_fault+0x26/0x30 [ 2672.899529][T27522] ? lockdep_hardirqs_on+0x7d/0x100 [ 2672.904825][T27522] __filemap_get_folio+0x288/0x990 [ 2672.910015][T27522] ext4_da_write_begin+0x3c7/0x8c0 [ 2672.915222][T27522] ? ext4_write_begin+0x1100/0x1100 [ 2672.920524][T27522] generic_perform_write+0x278/0x600 [ 2672.925948][T27522] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2672.931428][T27522] ? iunique+0x380/0x380 [ 2672.935760][T27522] ext4_buffered_write_iter+0x11f/0x3c0 [ 2672.941389][T27522] ext4_file_write_iter+0x7ee/0x1950 [ 2672.947219][T27522] ? lock_sync+0x190/0x190 [ 2672.951717][T27522] ? ext4_file_splice_read+0x150/0x150 [ 2672.957266][T27522] vfs_write+0x650/0xe40 [ 2672.961599][T27522] ? kernel_write+0x6c0/0x6c0 [ 2672.966366][T27522] ? __fget_files+0x279/0x410 [ 2672.971145][T27522] ksys_write+0x12f/0x250 [ 2672.975556][T27522] ? __ia32_sys_read+0xb0/0xb0 [ 2672.980408][T27522] ? syscall_enter_from_user_mode+0x26/0x80 [ 2672.986392][T27522] do_syscall_64+0x38/0xb0 [ 2672.990894][T27522] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2672.996913][T27522] RIP: 0033:0x7fc98cc7cae9 [ 2673.001384][T27522] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2673.023058][T27522] RSP: 002b:00007fc98da960c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2673.031545][T27522] RAX: ffffffffffffffda RBX: 00007fc98cd9bf80 RCX: 00007fc98cc7cae9 [ 2673.039576][T27522] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2673.047613][T27522] RBP: 00007fc98da96120 R08: 0000000000000000 R09: 0000000000000000 [ 2673.055645][T27522] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2673.063673][T27522] R13: 000000000000000b R14: 00007fc98cd9bf80 R15: 00007ffd4dc89db8 [ 2673.071732][T27522] 19:47:47 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 2) [ 2673.130497][T27527] FAULT_INJECTION: forcing a failure. [ 2673.130497][T27527] name failslab, interval 1, probability 0, space 0, times 0 [ 2673.161840][T27527] CPU: 1 PID: 27527 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2673.172366][T27527] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2673.182491][T27527] Call Trace: [ 2673.185911][T27527] [ 2673.188890][T27527] dump_stack_lvl+0x125/0x1b0 [ 2673.193634][T27527] should_fail_ex+0x496/0x5b0 [ 2673.198374][T27527] should_failslab+0x9/0x20 [ 2673.202937][T27527] kmem_cache_alloc+0x33a/0x3b0 [ 2673.207841][T27527] ? reacquire_held_locks+0x4b0/0x4b0 [ 2673.213271][T27527] ? preempt_count_sub+0x150/0x150 [ 2673.218448][T27527] jbd2__journal_start+0x190/0x690 [ 2673.223630][T27527] __ext4_journal_start_sb+0x40f/0x5c0 [ 2673.229151][T27527] ? ext4_dirty_inode+0xa1/0x130 [ 2673.234146][T27527] ? ext4_setattr+0x29e0/0x29e0 [ 2673.239056][T27527] ext4_dirty_inode+0xa1/0x130 [ 2673.243881][T27527] ? rcu_is_watching+0x12/0xb0 [ 2673.248712][T27527] __mark_inode_dirty+0x1e0/0xd50 [ 2673.253792][T27527] ? folio_flags.constprop.0+0x56/0x150 [ 2673.259410][T27527] generic_write_end+0x35f/0x440 [ 2673.264402][T27527] ext4_da_write_end+0x1fe/0xb40 [ 2673.269423][T27527] generic_perform_write+0x32e/0x600 [ 2673.274785][T27527] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2673.280219][T27527] ? iunique+0x380/0x380 [ 2673.284522][T27527] ext4_buffered_write_iter+0x11f/0x3c0 [ 2673.290127][T27527] ext4_file_write_iter+0x7ee/0x1950 [ 2673.295472][T27527] ? lock_sync+0x190/0x190 [ 2673.299938][T27527] ? ext4_file_splice_read+0x150/0x150 [ 2673.305456][T27527] vfs_write+0x650/0xe40 [ 2673.309757][T27527] ? kernel_write+0x6c0/0x6c0 [ 2673.314499][T27527] ? __fget_files+0x279/0x410 [ 2673.319246][T27527] ksys_write+0x12f/0x250 [ 2673.323632][T27527] ? __ia32_sys_read+0xb0/0xb0 [ 2673.328453][T27527] ? syscall_enter_from_user_mode+0x26/0x80 [ 2673.334416][T27527] do_syscall_64+0x38/0xb0 [ 2673.338879][T27527] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2673.344821][T27527] RIP: 0033:0x7f8cd127cae9 [ 2673.349281][T27527] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2673.369053][T27527] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2673.377511][T27527] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2673.385515][T27527] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2673.393525][T27527] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2673.401529][T27527] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2673.409533][T27527] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2673.417558][T27527] [ 2673.459833][T27499] bond1249: (slave bridge1178): making interface the new active one [ 2673.477046][T27529] FAULT_INJECTION: forcing a failure. [ 2673.477046][T27529] name fail_page_alloc, interval 1, probability 0, space 0, times 0 [ 2673.484551][T27499] bridge1178: entered promiscuous mode [ 2673.492486][T27529] CPU: 0 PID: 27529 Comm: syz-executor.0 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2673.506309][T27529] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2673.516422][T27529] Call Trace: [ 2673.519750][T27529] [ 2673.522729][T27529] dump_stack_lvl+0x125/0x1b0 [ 2673.527494][T27529] should_fail_ex+0x496/0x5b0 [ 2673.529815][T27499] bond1249: (slave bridge1178): Enslaving as an active interface with an up link [ 2673.532229][T27529] __should_fail_alloc_page+0xe7/0x130 [ 2673.532281][T27529] prepare_alloc_pages.constprop.0+0x16f/0x550 [ 2673.553176][T27529] __alloc_pages+0x14e/0x4a0 [ 2673.557850][T27529] ? __alloc_pages_slowpath.constprop.0+0x2360/0x2360 [ 2673.564704][T27529] ? xas_start+0x14f/0x780 [ 2673.569181][T27529] alloc_pages+0x1a9/0x270 [ 2673.573668][T27529] folio_alloc+0x1e/0x60 [ 2673.577989][T27529] filemap_alloc_folio+0x3bb/0x490 [ 2673.583197][T27529] ? folio_wake_bit+0x270/0x270 [ 2673.588133][T27529] ? asm_exc_page_fault+0x26/0x30 [ 2673.593223][T27529] ? lockdep_hardirqs_on+0x7d/0x100 [ 2673.598504][T27529] __filemap_get_folio+0x288/0x990 [ 2673.603679][T27529] ext4_da_write_begin+0x3c7/0x8c0 [ 2673.608875][T27529] ? ext4_write_begin+0x1100/0x1100 [ 2673.614187][T27529] generic_perform_write+0x278/0x600 [ 2673.619575][T27529] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2673.625040][T27529] ? iunique+0x380/0x380 [ 2673.629374][T27529] ext4_buffered_write_iter+0x11f/0x3c0 [ 2673.635001][T27529] ext4_file_write_iter+0x7ee/0x1950 [ 2673.640373][T27529] ? lock_acquire+0x1ef/0x510 [ 2673.645136][T27529] ? lock_sync+0x190/0x190 [ 2673.649637][T27529] ? ext4_file_splice_read+0x150/0x150 [ 2673.655185][T27529] vfs_write+0x650/0xe40 [ 2673.659522][T27529] ? kernel_write+0x6c0/0x6c0 [ 2673.664290][T27529] ? __fget_files+0x279/0x410 [ 2673.669083][T27529] ksys_write+0x12f/0x250 [ 2673.673503][T27529] ? __ia32_sys_read+0xb0/0xb0 [ 2673.678367][T27529] ? syscall_enter_from_user_mode+0x26/0x80 [ 2673.684366][T27529] do_syscall_64+0x38/0xb0 [ 2673.688859][T27529] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2673.694827][T27529] RIP: 0033:0x7fc98cc7cae9 [ 2673.699309][T27529] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2673.718983][T27529] RSP: 002b:00007fc98da960c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2673.727456][T27529] RAX: ffffffffffffffda RBX: 00007fc98cd9bf80 RCX: 00007fc98cc7cae9 [ 2673.735490][T27529] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2673.743516][T27529] RBP: 00007fc98da96120 R08: 0000000000000000 R09: 0000000000000000 [ 2673.751544][T27529] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2673.759569][T27529] R13: 000000000000000b R14: 00007fc98cd9bf80 R15: 00007ffd4dc89db8 [ 2673.767622][T27529] 19:47:47 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x78e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2673.818660][T27504] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:47 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 3) 19:47:48 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 8) [ 2673.874689][T27504] workqueue: Failed to create a rescuer kthread for wq "bond1207": -EINTR [ 2674.038691][T27536] FAULT_INJECTION: forcing a failure. [ 2674.038691][T27536] name failslab, interval 1, probability 0, space 0, times 0 [ 2674.074167][T27536] CPU: 0 PID: 27536 Comm: syz-executor.0 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2674.076938][T27538] FAULT_INJECTION: forcing a failure. [ 2674.076938][T27538] name fail_page_alloc, interval 1, probability 0, space 0, times 0 [ 2674.084648][T27536] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2674.108061][T27536] Call Trace: [ 2674.111386][T27536] [ 2674.114360][T27536] dump_stack_lvl+0x125/0x1b0 [ 2674.119129][T27536] should_fail_ex+0x496/0x5b0 [ 2674.123891][T27536] should_failslab+0x9/0x20 [ 2674.128497][T27536] __kmem_cache_alloc_node+0x2fd/0x350 [ 2674.134037][T27536] ? ext4_find_extent+0x958/0xce0 [ 2674.139134][T27536] ? print_usage_bug.part.0+0x670/0x670 [ 2674.144763][T27536] ? ext4_find_extent+0x958/0xce0 [ 2674.149855][T27536] __kmalloc+0x4c/0x100 [ 2674.154075][T27536] ext4_find_extent+0x958/0xce0 [ 2674.158988][T27536] ext4_ext_map_blocks+0x26b/0x5b00 [ 2674.164257][T27536] ? asm_sysvec_apic_timer_interrupt+0x1a/0x20 [ 2674.170483][T27536] ? lockdep_hardirqs_on+0x7d/0x100 [ 2674.175757][T27536] ? asm_sysvec_apic_timer_interrupt+0x1a/0x20 [ 2674.182081][T27536] ? ext4_ext_release+0x10/0x10 [ 2674.187092][T27536] ? lock_sync+0x190/0x190 [ 2674.191566][T27536] ? reacquire_held_locks+0x4b0/0x4b0 [ 2674.196984][T27536] ? preempt_count_sub+0x150/0x150 [ 2674.202156][T27536] ? ext4_es_lookup_extent+0xc7/0xbf0 [ 2674.207576][T27536] ext4_da_get_block_prep+0xd80/0x1340 [ 2674.213089][T27536] ? ext4_dax_writepages+0xb30/0xb30 [ 2674.218422][T27536] ? ext4_block_write_begin+0xc08/0xe30 [ 2674.224013][T27536] ? reacquire_held_locks+0x4b0/0x4b0 [ 2674.229436][T27536] ? folio_flags.constprop.0+0x56/0x150 [ 2674.235044][T27536] ext4_block_write_begin+0x3da/0xe30 [ 2674.240468][T27536] ? ext4_dax_writepages+0xb30/0xb30 [ 2674.245799][T27536] ? mpage_map_and_submit_buffers+0xab0/0xab0 [ 2674.251912][T27536] ? __filemap_get_folio+0x1e7/0x990 [ 2674.259150][T27536] ext4_da_write_begin+0x40a/0x8c0 [ 2674.264324][T27536] ? ext4_write_begin+0x1100/0x1100 [ 2674.269582][T27536] generic_perform_write+0x278/0x600 [ 2674.274929][T27536] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2674.280349][T27536] ? iunique+0x380/0x380 [ 2674.284645][T27536] ext4_buffered_write_iter+0x11f/0x3c0 [ 2674.290244][T27536] ext4_file_write_iter+0x7ee/0x1950 [ 2674.295576][T27536] ? lock_sync+0x190/0x190 [ 2674.300035][T27536] ? ext4_file_splice_read+0x150/0x150 [ 2674.305542][T27536] vfs_write+0x650/0xe40 [ 2674.309842][T27536] ? kernel_write+0x6c0/0x6c0 [ 2674.314661][T27536] ? __fget_files+0x279/0x410 [ 2674.319402][T27536] ksys_write+0x12f/0x250 [ 2674.323779][T27536] ? __ia32_sys_read+0xb0/0xb0 [ 2674.328592][T27536] ? syscall_enter_from_user_mode+0x26/0x80 [ 2674.334540][T27536] do_syscall_64+0x38/0xb0 [ 2674.338994][T27536] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2674.344924][T27536] RIP: 0033:0x7fc98cc7cae9 [ 2674.349363][T27536] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2674.369006][T27536] RSP: 002b:00007fc98da960c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2674.377469][T27536] RAX: ffffffffffffffda RBX: 00007fc98cd9bf80 RCX: 00007fc98cc7cae9 [ 2674.385479][T27536] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2674.393481][T27536] RBP: 00007fc98da96120 R08: 0000000000000000 R09: 0000000000000000 [ 2674.401477][T27536] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2674.409731][T27536] R13: 000000000000000b R14: 00007fc98cd9bf80 R15: 00007ffd4dc89db8 [ 2674.417747][T27536] [ 2674.443352][T27538] CPU: 1 PID: 27538 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2674.453859][T27538] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2674.463964][T27538] Call Trace: [ 2674.467265][T27538] [ 2674.470206][T27538] dump_stack_lvl+0x125/0x1b0 [ 2674.474908][T27538] should_fail_ex+0x496/0x5b0 [ 2674.479616][T27538] __should_fail_alloc_page+0xe7/0x130 [ 2674.485101][T27538] prepare_alloc_pages.constprop.0+0x16f/0x550 [ 2674.491330][T27538] __alloc_pages+0x14e/0x4a0 [ 2674.496003][T27538] ? __alloc_pages_slowpath.constprop.0+0x2360/0x2360 [ 2674.502866][T27538] ? __filemap_get_folio+0x288/0x990 [ 2674.508237][T27538] ? mark_held_locks+0x9f/0xe0 [ 2674.513091][T27538] __folio_alloc+0x16/0x40 [ 2674.517597][T27538] filemap_alloc_folio+0x154/0x490 [ 2674.522817][T27538] ? folio_wake_bit+0x270/0x270 [ 2674.527758][T27538] ? asm_exc_page_fault+0x26/0x30 [ 2674.532853][T27538] ? lockdep_hardirqs_on+0x7d/0x100 [ 2674.538126][T27538] __filemap_get_folio+0x288/0x990 [ 2674.543289][T27538] ext4_da_write_begin+0x3c7/0x8c0 [ 2674.548467][T27538] ? ext4_write_begin+0x1100/0x1100 [ 2674.553741][T27538] generic_perform_write+0x278/0x600 [ 2674.559103][T27538] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2674.564539][T27538] ? iunique+0x380/0x380 [ 2674.568836][T27538] ext4_buffered_write_iter+0x11f/0x3c0 [ 2674.574433][T27538] ext4_file_write_iter+0x7ee/0x1950 [ 2674.579778][T27538] ? lock_sync+0x190/0x190 [ 2674.584242][T27538] ? ext4_file_splice_read+0x150/0x150 [ 2674.589763][T27538] vfs_write+0x650/0xe40 [ 2674.594069][T27538] ? kernel_write+0x6c0/0x6c0 [ 2674.598812][T27538] ? __fget_files+0x279/0x410 [ 2674.603564][T27538] ksys_write+0x12f/0x250 [ 2674.607950][T27538] ? __ia32_sys_read+0xb0/0xb0 [ 2674.612768][T27538] ? syscall_enter_from_user_mode+0x26/0x80 [ 2674.618731][T27538] do_syscall_64+0x38/0xb0 [ 2674.623205][T27538] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2674.629157][T27538] RIP: 0033:0x7f8cd127cae9 [ 2674.633606][T27538] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2674.653262][T27538] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2674.661718][T27538] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2674.669750][T27538] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2674.677757][T27538] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2674.685760][T27538] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2674.693762][T27538] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2674.701788][T27538] [ 2674.710798][T27509] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:49 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xed8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:49 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 4) 19:47:49 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 9) 19:47:49 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x72150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2674.740071][T27509] workqueue: Failed to create a rescuer kthread for wq "bond1133": -EINTR [ 2675.008720][T27518] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2675.085628][T27532] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2675.101068][T27545] FAULT_INJECTION: forcing a failure. [ 2675.101068][T27545] name failslab, interval 1, probability 0, space 0, times 0 [ 2675.115358][T27545] CPU: 1 PID: 27545 Comm: syz-executor.0 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2675.125861][T27545] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2675.135977][T27545] Call Trace: [ 2675.139307][T27545] [ 2675.142295][T27545] dump_stack_lvl+0x125/0x1b0 [ 2675.147056][T27545] should_fail_ex+0x496/0x5b0 [ 2675.151815][T27545] ? __es_tree_search.isra.0+0x210/0x210 [ 2675.157532][T27545] should_failslab+0x9/0x20 [ 2675.162124][T27545] kmem_cache_alloc+0x69/0x3b0 [ 2675.166986][T27545] __es_insert_extent+0x741/0x1470 [ 2675.172194][T27545] ext4_es_insert_delayed_block+0x2ac/0x610 [ 2675.178174][T27545] ? ext4_is_pending+0x200/0x200 [ 2675.183183][T27545] ? percpu_counter_add_batch+0x132/0x1f0 [ 2675.188980][T27545] ? do_raw_spin_unlock+0x173/0x230 [ 2675.194260][T27545] ? _raw_spin_unlock+0x28/0x40 [ 2675.199189][T27545] ext4_da_get_block_prep+0x850/0x1340 [ 2675.204749][T27545] ? ext4_dax_writepages+0xb30/0xb30 [ 2675.210122][T27545] ? ext4_block_write_begin+0xc08/0xe30 [ 2675.215746][T27545] ? reacquire_held_locks+0x4b0/0x4b0 [ 2675.218337][T27532] workqueue: Failed to create a rescuer kthread for wq "bond1250": -EINTR [ 2675.221178][T27545] ? folio_flags.constprop.0+0x56/0x150 [ 2675.221252][T27545] ext4_block_write_begin+0x3da/0xe30 [ 2675.221314][T27545] ? ext4_dax_writepages+0xb30/0xb30 [ 2675.221369][T27545] ? mpage_map_and_submit_buffers+0xab0/0xab0 [ 2675.221428][T27545] ? __filemap_get_folio+0x1e7/0x990 [ 2675.221475][T27545] ext4_da_write_begin+0x40a/0x8c0 [ 2675.262830][T27545] ? ext4_write_begin+0x1100/0x1100 [ 2675.268143][T27545] generic_perform_write+0x278/0x600 [ 2675.273535][T27545] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2675.279003][T27545] ? iunique+0x380/0x380 [ 2675.283340][T27545] ext4_buffered_write_iter+0x11f/0x3c0 [ 2675.288964][T27545] ext4_file_write_iter+0x7ee/0x1950 [ 2675.294328][T27545] ? lock_sync+0x190/0x190 [ 2675.298821][T27545] ? ext4_file_splice_read+0x150/0x150 [ 2675.304367][T27545] vfs_write+0x650/0xe40 [ 2675.308697][T27545] ? kernel_write+0x6c0/0x6c0 [ 2675.313461][T27545] ? __fget_files+0x279/0x410 [ 2675.318234][T27545] ksys_write+0x12f/0x250 [ 2675.322651][T27545] ? __ia32_sys_read+0xb0/0xb0 [ 2675.327503][T27545] ? syscall_enter_from_user_mode+0x26/0x80 [ 2675.333509][T27545] do_syscall_64+0x38/0xb0 [ 2675.338002][T27545] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2675.343980][T27545] RIP: 0033:0x7fc98cc7cae9 [ 2675.348455][T27545] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2675.368138][T27545] RSP: 002b:00007fc98da960c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2675.376630][T27545] RAX: ffffffffffffffda RBX: 00007fc98cd9bf80 RCX: 00007fc98cc7cae9 19:47:49 executing program 3: r0 = socket$inet6(0xa, 0x2, 0x0) (async, rerun: 64) r1 = socket(0x10, 0x3, 0x0) (async, rerun: 64) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$BATADV_CMD_GET_MESH(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffff0500000000", @ANYRES32=r4, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r5, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) connect$inet6(r5, &(0x7f0000000040)={0xa, 0x0, 0x0, @dev, 0xf}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r5, 0x6, 0x1f, &(0x7f0000000000), 0x4) (async) setsockopt$inet6_tcp_TLS_TX(r5, 0x11a, 0x2, &(0x7f0000000180)=@gcm_128={{0x303, 0x38}, "00000100", "6a1d45a8ef8ee704328c671d000000f6", "94a92000", "920b00652000"}, 0x28) (async) sendto$inet6(r5, &(0x7f00000002c0)="31f52096bd9d5914f4b6b22787465ca98b5874f329ac77e9e1f11f05753e50", 0x1f, 0x20000004, &(0x7f0000000300)={0xa, 0x4e23, 0x4, @loopback, 0x2}, 0x1c) (async) sendmsg$nl_route_sched(r1, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000340)=@newqdisc={0x50, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r4, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_htb={{0x8}, {0x24, 0x2, [@TCA_HTB_INIT={0x18}, @TCA_HTB_DIRECT_QLEN={0x8}]}}]}, 0x50}}, 0x0) (async) r6 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$TIPC_NL_LINK_RESET_STATS(r6, &(0x7f0000000240)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000200)={&(0x7f00000003c0)={0x90, 0x0, 0x400, 0x70bd29, 0x25dfdbfe, {}, [@TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x60e}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0xffffffff}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x5}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x6}]}, @TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x2}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x7d7}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x9}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x1f}]}, @TIPC_NLA_MON={0x34, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_REF={0x8, 0x2, 0xff}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x400}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x9}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x9}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x7fffffff}]}]}, 0x90}, 0x1, 0x0, 0x0, 0x28000}, 0x20000000) (async) sendmmsg$inet(r0, &(0x7f0000000280)=[{{&(0x7f00000001c0)={0x2, 0x4e21, @multicast1=0xe0000300}, 0x10, 0x0, 0x0, &(0x7f0000000000)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r4, @empty}}}], 0x20}}], 0x1, 0x0) 19:47:49 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 10) 19:47:49 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x72c81dcb, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2675.384671][T27545] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2675.392707][T27545] RBP: 00007fc98da96120 R08: 0000000000000000 R09: 0000000000000000 [ 2675.400739][T27545] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 2675.408769][T27545] R13: 000000000000000b R14: 00007fc98cd9bf80 R15: 00007ffd4dc89db8 [ 2675.416832][T27545] [ 2675.500959][T27556] FAULT_INJECTION: forcing a failure. [ 2675.500959][T27556] name failslab, interval 1, probability 0, space 0, times 0 [ 2675.542114][T27551] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2675.571535][T27556] CPU: 1 PID: 27556 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2675.582050][T27556] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2675.592172][T27556] Call Trace: [ 2675.595589][T27556] [ 2675.598574][T27556] dump_stack_lvl+0x125/0x1b0 [ 2675.603329][T27556] should_fail_ex+0x496/0x5b0 [ 2675.608102][T27556] should_failslab+0x9/0x20 [ 2675.612693][T27556] kmem_cache_alloc+0x33a/0x3b0 [ 2675.617624][T27556] ? reacquire_held_locks+0x4b0/0x4b0 [ 2675.623088][T27556] ? preempt_count_sub+0x150/0x150 [ 2675.628287][T27556] jbd2__journal_start+0x190/0x690 [ 2675.633509][T27556] __ext4_journal_start_sb+0x40f/0x5c0 [ 2675.639085][T27556] ? ext4_dirty_inode+0xa1/0x130 [ 2675.644133][T27556] ? ext4_setattr+0x29e0/0x29e0 [ 2675.649086][T27556] ext4_dirty_inode+0xa1/0x130 [ 2675.653950][T27556] ? rcu_is_watching+0x12/0xb0 [ 2675.658807][T27556] __mark_inode_dirty+0x1e0/0xd50 [ 2675.663918][T27556] ? folio_flags.constprop.0+0x56/0x150 [ 2675.669041][T27551] bond1133: entered promiscuous mode [ 2675.669535][T27556] generic_write_end+0x35f/0x440 [ 2675.669589][T27556] ext4_da_write_end+0x1fe/0xb40 [ 2675.669657][T27556] generic_perform_write+0x32e/0x600 [ 2675.676470][T27551] 8021q: adding VLAN 0 to HW filter on device bond1133 [ 2675.679874][T27556] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2675.679941][T27556] ? iunique+0x380/0x380 [ 2675.680000][T27556] ext4_buffered_write_iter+0x11f/0x3c0 [ 2675.680049][T27556] ext4_file_write_iter+0x7ee/0x1950 [ 2675.680111][T27556] ? lock_sync+0x190/0x190 [ 2675.680159][T27556] ? ext4_file_splice_read+0x150/0x150 [ 2675.680221][T27556] vfs_write+0x650/0xe40 [ 2675.680281][T27556] ? kernel_write+0x6c0/0x6c0 [ 2675.736837][T27556] ? __fget_files+0x279/0x410 [ 2675.741616][T27556] ksys_write+0x12f/0x250 [ 2675.746027][T27556] ? __ia32_sys_read+0xb0/0xb0 [ 2675.750888][T27556] ? syscall_enter_from_user_mode+0x26/0x80 [ 2675.756882][T27556] do_syscall_64+0x38/0xb0 [ 2675.761370][T27556] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2675.767329][T27556] RIP: 0033:0x7f8cd127cae9 [ 2675.771796][T27556] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 19:47:49 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x79e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2675.791461][T27556] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2675.799935][T27556] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2675.807964][T27556] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2675.815991][T27556] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2675.824019][T27556] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2675.832048][T27556] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2675.840116][T27556] 19:47:49 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 11) [ 2675.999900][T27553] bond1133: (slave bridge1062): making interface the new active one [ 2676.010878][T27573] FAULT_INJECTION: forcing a failure. [ 2676.010878][T27573] name failslab, interval 1, probability 0, space 0, times 0 [ 2676.019693][T27553] bridge1062: entered promiscuous mode [ 2676.031526][T27573] CPU: 0 PID: 27573 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2676.042040][T27573] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2676.052153][T27573] Call Trace: [ 2676.055479][T27573] [ 2676.057262][T27553] bond1133: (slave bridge1062): Enslaving as an active interface with an up link [ 2676.058435][T27573] dump_stack_lvl+0x125/0x1b0 [ 2676.072324][T27573] should_fail_ex+0x496/0x5b0 [ 2676.077088][T27573] should_failslab+0x9/0x20 [ 2676.081663][T27573] kmem_cache_alloc+0x33a/0x3b0 [ 2676.086587][T27573] ? reacquire_held_locks+0x4b0/0x4b0 [ 2676.092045][T27573] ? preempt_count_sub+0x150/0x150 [ 2676.097224][T27573] jbd2__journal_start+0x190/0x690 [ 2676.102395][T27573] __ext4_journal_start_sb+0x40f/0x5c0 [ 2676.107904][T27573] ? ext4_dirty_inode+0xa1/0x130 [ 2676.112890][T27573] ? ext4_setattr+0x29e0/0x29e0 [ 2676.117789][T27573] ext4_dirty_inode+0xa1/0x130 [ 2676.122602][T27573] ? rcu_is_watching+0x12/0xb0 [ 2676.127438][T27573] __mark_inode_dirty+0x1e0/0xd50 [ 2676.132506][T27573] ? folio_flags.constprop.0+0x56/0x150 [ 2676.138112][T27573] generic_write_end+0x35f/0x440 [ 2676.143088][T27573] ext4_da_write_end+0x1fe/0xb40 [ 2676.148085][T27573] generic_perform_write+0x32e/0x600 [ 2676.153433][T27573] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2676.158854][T27573] ? iunique+0x380/0x380 [ 2676.163147][T27573] ext4_buffered_write_iter+0x11f/0x3c0 [ 2676.169347][T27573] ext4_file_write_iter+0x7ee/0x1950 [ 2676.174693][T27573] ? lock_sync+0x190/0x190 [ 2676.179158][T27573] ? ext4_file_splice_read+0x150/0x150 [ 2676.184672][T27573] vfs_write+0x650/0xe40 [ 2676.188972][T27573] ? kernel_write+0x6c0/0x6c0 [ 2676.193714][T27573] ? __fget_files+0x279/0x410 [ 2676.198449][T27573] ksys_write+0x12f/0x250 [ 2676.202831][T27573] ? __ia32_sys_read+0xb0/0xb0 [ 2676.207674][T27573] ? syscall_enter_from_user_mode+0x26/0x80 [ 2676.213626][T27573] do_syscall_64+0x38/0xb0 [ 2676.218080][T27573] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2676.224011][T27573] RIP: 0033:0x7f8cd127cae9 [ 2676.228454][T27573] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2676.248193][T27573] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2676.256649][T27573] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2676.264655][T27573] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2676.272676][T27573] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2676.280677][T27573] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2676.288682][T27573] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2676.296727][T27573] 19:47:50 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:50 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xee8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2676.365291][T27561] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2676.487933][T27562] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:50 executing program 3: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$netlbl_cipso(&(0x7f0000000040), r0) sendmsg$NLBL_CIPSOV4_C_ADD(r1, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000080)={&(0x7f00000003c0)=ANY=[@ANYBLOB='0\x00\x00\x00', @ANYRES16=r2, @ANYBLOB="0100000000000000000001000000080002000200000008000100050000000c1004800500030005000000"], 0x30}}, 0x0) 19:47:50 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 12) [ 2676.529068][T27562] workqueue: Failed to create a rescuer kthread for wq "bond1207": -EINTR [ 2676.714358][T27582] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2676.742243][T27584] FAULT_INJECTION: forcing a failure. [ 2676.742243][T27584] name fail_usercopy, interval 1, probability 0, space 0, times 0 [ 2676.758060][T27584] CPU: 0 PID: 27584 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2676.768564][T27584] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2676.778681][T27584] Call Trace: [ 2676.782007][T27584] [ 2676.785022][T27584] dump_stack_lvl+0x125/0x1b0 [ 2676.789791][T27584] should_fail_ex+0x496/0x5b0 [ 2676.794570][T27584] copyin+0x1b/0xa0 [ 2676.798475][T27584] copy_page_from_iter_atomic+0x257/0x13e0 [ 2676.804382][T27584] ? ext4_da_write_begin+0x228/0x8c0 19:47:50 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x2, 0x101bf) [ 2676.809768][T27584] ? csum_and_copy_to_iter+0x1430/0x1430 [ 2676.815488][T27584] ? ext4_write_begin+0x1100/0x1100 [ 2676.820778][T27584] ? ext4_da_write_end+0x1fe/0xb40 [ 2676.825995][T27584] generic_perform_write+0x2e8/0x600 [ 2676.831396][T27584] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2676.836874][T27584] ? iunique+0x380/0x380 [ 2676.841215][T27584] ext4_buffered_write_iter+0x11f/0x3c0 [ 2676.846847][T27584] ext4_file_write_iter+0x7ee/0x1950 [ 2676.852217][T27584] ? lock_acquire+0x1ef/0x510 [ 2676.856996][T27584] ? lock_sync+0x190/0x190 [ 2676.861516][T27584] ? ext4_file_splice_read+0x150/0x150 [ 2676.867084][T27584] vfs_write+0x650/0xe40 [ 2676.871433][T27584] ? kernel_write+0x6c0/0x6c0 [ 2676.876216][T27584] ? __fget_files+0x279/0x410 [ 2676.881010][T27584] ksys_write+0x12f/0x250 [ 2676.885461][T27584] ? __ia32_sys_read+0xb0/0xb0 [ 2676.890336][T27584] ? syscall_enter_from_user_mode+0x26/0x80 [ 2676.896338][T27584] do_syscall_64+0x38/0xb0 [ 2676.900838][T27584] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2676.906813][T27584] RIP: 0033:0x7f8cd127cae9 [ 2676.911288][T27584] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2676.930967][T27584] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2676.939447][T27584] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2676.947485][T27584] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2676.955524][T27584] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2676.963554][T27584] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2676.971585][T27584] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2676.979648][T27584] [ 2677.029233][T27571] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:51 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x73150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:51 executing program 3: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) r2 = syz_genetlink_get_family_id$netlbl_cipso(&(0x7f0000000040), r0) sendmsg$NLBL_CIPSOV4_C_ADD(r1, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000080)={&(0x7f00000003c0)=ANY=[@ANYBLOB='0\x00\x00\x00', @ANYRES16=r2, @ANYBLOB="0100000000000000000001000000080002000200000008000100050000000c1004800500030005000000"], 0x30}}, 0x0) 19:47:51 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 13) 19:47:51 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3, 0x101bf) [ 2677.131818][T27571] workqueue: Failed to create a rescuer kthread for wq "bond1250": -EINTR [ 2677.172856][T27592] FAULT_INJECTION: forcing a failure. [ 2677.172856][T27592] name failslab, interval 1, probability 0, space 0, times 0 [ 2677.285713][T27596] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2677.301263][T27592] CPU: 1 PID: 27592 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2677.311774][T27592] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2677.321886][T27592] Call Trace: [ 2677.325217][T27592] [ 2677.328197][T27592] dump_stack_lvl+0x125/0x1b0 [ 2677.332938][T27592] should_fail_ex+0x496/0x5b0 [ 2677.337684][T27592] should_failslab+0x9/0x20 [ 2677.342251][T27592] kmem_cache_alloc+0x33a/0x3b0 [ 2677.347158][T27592] ? reacquire_held_locks+0x4b0/0x4b0 [ 2677.352587][T27592] ? preempt_count_sub+0x150/0x150 [ 2677.357762][T27592] jbd2__journal_start+0x190/0x690 [ 2677.362957][T27592] __ext4_journal_start_sb+0x40f/0x5c0 [ 2677.368511][T27592] ? ext4_dirty_inode+0xa1/0x130 [ 2677.373531][T27592] ? ext4_setattr+0x29e0/0x29e0 [ 2677.378450][T27592] ext4_dirty_inode+0xa1/0x130 [ 2677.383337][T27592] ? rcu_is_watching+0x12/0xb0 [ 2677.388191][T27592] __mark_inode_dirty+0x1e0/0xd50 [ 2677.393282][T27592] ? folio_flags.constprop.0+0x56/0x150 [ 2677.398908][T27592] generic_write_end+0x35f/0x440 [ 2677.403899][T27592] ext4_da_write_end+0x1fe/0xb40 [ 2677.408907][T27592] generic_perform_write+0x32e/0x600 [ 2677.414269][T27592] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2677.419700][T27592] ? iunique+0x380/0x380 [ 2677.424005][T27592] ext4_buffered_write_iter+0x11f/0x3c0 [ 2677.429603][T27592] ext4_file_write_iter+0x7ee/0x1950 [ 2677.434944][T27592] ? lock_sync+0x190/0x190 [ 2677.439411][T27592] ? ext4_file_splice_read+0x150/0x150 [ 2677.444945][T27592] vfs_write+0x650/0xe40 [ 2677.449250][T27592] ? kernel_write+0x6c0/0x6c0 [ 2677.453985][T27592] ? __fget_files+0x279/0x410 [ 2677.458724][T27592] ksys_write+0x12f/0x250 [ 2677.463104][T27592] ? __ia32_sys_read+0xb0/0xb0 [ 2677.467955][T27592] ? syscall_enter_from_user_mode+0x26/0x80 [ 2677.473915][T27592] do_syscall_64+0x38/0xb0 [ 2677.478371][T27592] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2677.484310][T27592] RIP: 0033:0x7f8cd127cae9 [ 2677.488761][T27592] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2677.508409][T27592] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2677.516863][T27592] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2677.524874][T27592] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2677.532900][T27592] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2677.540900][T27592] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2677.548902][T27592] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2677.556941][T27592] 19:47:51 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x4, 0x101bf) [ 2677.651639][T27576] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:51 executing program 3: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) r2 = syz_genetlink_get_family_id$netlbl_cipso(&(0x7f0000000040), r0) sendmsg$NLBL_CIPSOV4_C_ADD(r1, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000080)={&(0x7f00000003c0)=ANY=[@ANYBLOB='0\x00\x00\x00', @ANYRES16=r2, @ANYBLOB="0100000000000000000001000000080002000200000008000100050000000c1004800500030005000000"], 0x30}}, 0x0) 19:47:51 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7a000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:51 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x5, 0x101bf) [ 2677.691833][T27576] workqueue: Failed to create a rescuer kthread for wq "bond1134": -EINTR [ 2677.861631][T27608] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2677.994564][T27595] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:52 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xef8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:52 executing program 3: ioctl$PPPIOCGDEBUG(0xffffffffffffffff, 0x80047441, &(0x7f0000000000)) r0 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$NFT_BATCH(r0, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=ANY=[@ANYBLOB="140000001000010002000000000000000000009778a667c3130a20000000000a05000000000000000000010000000900010073797a300000000064000000090a010400000000000000070000000008000a40000000000900020073797a31000000000900010073797a300000000008000540000000212800118008000100636d70001c000280080003800400020008000140000000000800024000000000140000001000010000000000000000000000000a5bef9ae4c302e643509593de243b75c9f16fc538ec5bc0f1"], 0xac}}, 0x0) 19:47:52 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x6, 0x101bf) [ 2678.075466][T27595] bond1207: entered promiscuous mode [ 2678.081261][T27595] 8021q: adding VLAN 0 to HW filter on device bond1207 19:47:52 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 14) [ 2678.290597][T27600] bond1207: (slave bridge1109): making interface the new active one [ 2678.305318][T27600] bridge1109: entered promiscuous mode [ 2678.319553][T27624] FAULT_INJECTION: forcing a failure. [ 2678.319553][T27624] name fail_page_alloc, interval 1, probability 0, space 0, times 0 [ 2678.348556][T27600] bond1207: (slave bridge1109): Enslaving as an active interface with an up link [ 2678.383691][T27611] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:52 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x74000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:52 executing program 3: ioctl$PPPIOCGDEBUG(0xffffffffffffffff, 0x80047441, &(0x7f0000000000)) r0 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$NFT_BATCH(r0, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=ANY=[@ANYBLOB="140000001000010002000000000000000000009778a667c3130a20000000000a05000000000000000000010000000900010073797a300000000064000000090a010400000000000000070000000008000a40000000000900020073797a31000000000900010073797a300000000008000540000000212800118008000100636d70001c000280080003800400020008000140000000000800024000000000140000001000010000000000000000000000000a5bef9ae4c302e643509593de243b75c9f16fc538ec5bc0f1"], 0xac}}, 0x0) (async) sendmsg$NFT_BATCH(r0, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=ANY=[@ANYBLOB="140000001000010002000000000000000000009778a667c3130a20000000000a05000000000000000000010000000900010073797a300000000064000000090a010400000000000000070000000008000a40000000000900020073797a31000000000900010073797a300000000008000540000000212800118008000100636d70001c000280080003800400020008000140000000000800024000000000140000001000010000000000000000000000000a5bef9ae4c302e643509593de243b75c9f16fc538ec5bc0f1"], 0xac}}, 0x0) 19:47:52 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x7, 0x101bf) [ 2678.434001][T27624] CPU: 1 PID: 27624 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2678.444513][T27624] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2678.454631][T27624] Call Trace: [ 2678.457955][T27624] [ 2678.460932][T27624] dump_stack_lvl+0x125/0x1b0 [ 2678.465688][T27624] should_fail_ex+0x496/0x5b0 [ 2678.470452][T27624] __should_fail_alloc_page+0xe7/0x130 [ 2678.475997][T27624] prepare_alloc_pages.constprop.0+0x16f/0x550 [ 2678.482254][T27624] __alloc_pages+0x14e/0x4a0 [ 2678.486932][T27624] ? __alloc_pages_slowpath.constprop.0+0x2360/0x2360 [ 2678.493767][T27624] ? __filemap_get_folio+0x288/0x990 [ 2678.499106][T27624] ? mark_held_locks+0x9f/0xe0 [ 2678.503938][T27624] __folio_alloc+0x16/0x40 [ 2678.508425][T27624] filemap_alloc_folio+0x154/0x490 [ 2678.513615][T27624] ? folio_wake_bit+0x270/0x270 [ 2678.518531][T27624] ? asm_exc_page_fault+0x26/0x30 [ 2678.523602][T27624] ? lockdep_hardirqs_on+0x7d/0x100 [ 2678.528868][T27624] __filemap_get_folio+0x288/0x990 [ 2678.534029][T27624] ext4_da_write_begin+0x3c7/0x8c0 [ 2678.539211][T27624] ? ext4_write_begin+0x1100/0x1100 [ 2678.544468][T27624] ? ext4_da_write_end+0x1fe/0xb40 [ 2678.549652][T27624] generic_perform_write+0x278/0x600 [ 2678.555011][T27624] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2678.560440][T27624] ? iunique+0x380/0x380 [ 2678.564742][T27624] ext4_buffered_write_iter+0x11f/0x3c0 [ 2678.570358][T27624] ext4_file_write_iter+0x7ee/0x1950 [ 2678.575700][T27624] ? lock_sync+0x190/0x190 [ 2678.580166][T27624] ? ext4_file_splice_read+0x150/0x150 [ 2678.585679][T27624] vfs_write+0x650/0xe40 [ 2678.589989][T27624] ? kernel_write+0x6c0/0x6c0 [ 2678.594734][T27624] ? __fget_files+0x279/0x410 [ 2678.599486][T27624] ksys_write+0x12f/0x250 [ 2678.603879][T27624] ? __ia32_sys_read+0xb0/0xb0 [ 2678.608702][T27624] ? syscall_enter_from_user_mode+0x26/0x80 [ 2678.614664][T27624] do_syscall_64+0x38/0xb0 [ 2678.619130][T27624] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2678.625100][T27624] RIP: 0033:0x7f8cd127cae9 [ 2678.629566][T27624] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2678.649230][T27624] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2678.657687][T27624] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2678.665692][T27624] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2678.673719][T27624] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2678.681724][T27624] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2678.689767][T27624] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2678.697793][T27624] [ 2678.772293][T27611] bond1250: entered promiscuous mode 19:47:52 executing program 3: ioctl$PPPIOCGDEBUG(0xffffffffffffffff, 0x80047441, &(0x7f0000000000)) (async) r0 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$NFT_BATCH(r0, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=ANY=[@ANYBLOB="140000001000010002000000000000000000009778a667c3130a20000000000a05000000000000000000010000000900010073797a300000000064000000090a010400000000000000070000000008000a40000000000900020073797a31000000000900010073797a300000000008000540000000212800118008000100636d70001c000280080003800400020008000140000000000800024000000000140000001000010000000000000000000000000a5bef9ae4c302e643509593de243b75c9f16fc538ec5bc0f1"], 0xac}}, 0x0) 19:47:52 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 15) 19:47:52 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x8, 0x101bf) [ 2678.796644][T27611] 8021q: adding VLAN 0 to HW filter on device bond1250 [ 2678.918845][T27641] FAULT_INJECTION: forcing a failure. [ 2678.918845][T27641] name fail_page_alloc, interval 1, probability 0, space 0, times 0 [ 2678.939819][T27641] CPU: 0 PID: 27641 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2678.950339][T27641] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2678.960461][T27641] Call Trace: [ 2678.963793][T27641] [ 2678.966779][T27641] dump_stack_lvl+0x125/0x1b0 [ 2678.971553][T27641] should_fail_ex+0x496/0x5b0 [ 2678.976326][T27641] __should_fail_alloc_page+0xe7/0x130 [ 2678.981886][T27641] prepare_alloc_pages.constprop.0+0x16f/0x550 [ 2678.988142][T27641] __alloc_pages+0x14e/0x4a0 [ 2678.992841][T27641] ? __alloc_pages_slowpath.constprop.0+0x2360/0x2360 [ 2678.999712][T27641] ? __filemap_get_folio+0x288/0x990 [ 2679.005081][T27641] ? mark_held_locks+0x9f/0xe0 [ 2679.009926][T27641] __folio_alloc+0x16/0x40 [ 2679.014420][T27641] filemap_alloc_folio+0x154/0x490 [ 2679.019623][T27641] ? folio_wake_bit+0x270/0x270 [ 2679.024561][T27641] ? asm_exc_page_fault+0x26/0x30 [ 2679.029544][T27614] bond1250: (slave bridge1179): making interface the new active one [ 2679.029633][T27641] ? lockdep_hardirqs_on+0x7d/0x100 [ 2679.042913][T27641] __filemap_get_folio+0x288/0x990 [ 2679.048095][T27641] ext4_da_write_begin+0x3c7/0x8c0 [ 2679.053270][T27641] ? ext4_write_begin+0x1100/0x1100 [ 2679.058511][T27641] ? ext4_da_write_end+0x1fe/0xb40 [ 2679.063681][T27641] generic_perform_write+0x278/0x600 [ 2679.069028][T27641] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2679.074448][T27641] ? iunique+0x380/0x380 [ 2679.078738][T27641] ext4_buffered_write_iter+0x11f/0x3c0 [ 2679.084324][T27641] ext4_file_write_iter+0x7ee/0x1950 [ 2679.089656][T27641] ? lock_sync+0x190/0x190 [ 2679.094122][T27641] ? ext4_file_splice_read+0x150/0x150 [ 2679.099642][T27641] vfs_write+0x650/0xe40 [ 2679.103939][T27641] ? kernel_write+0x6c0/0x6c0 [ 2679.108672][T27641] ? __fget_files+0x279/0x410 [ 2679.113412][T27641] ksys_write+0x12f/0x250 [ 2679.117790][T27641] ? __ia32_sys_read+0xb0/0xb0 [ 2679.122621][T27641] ? syscall_enter_from_user_mode+0x26/0x80 [ 2679.128585][T27641] do_syscall_64+0x38/0xb0 [ 2679.133055][T27641] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2679.139018][T27641] RIP: 0033:0x7f8cd127cae9 [ 2679.143475][T27641] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2679.163122][T27641] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2679.171577][T27641] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2679.179596][T27641] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2679.187602][T27641] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2679.195602][T27641] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2679.203600][T27641] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2679.211635][T27641] [ 2679.226927][T27614] bridge1179: entered promiscuous mode [ 2679.256508][T27614] bond1250: (slave bridge1179): Enslaving as an active interface with an up link 19:47:53 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7a030000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:53 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) syz_genetlink_get_family_id$nl80211(&(0x7f00000001c0), r1) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000080)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_SET_MPATH(r0, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000140)={0x64, r3, 0x100, 0x70bd29, 0x25dfdbfb, {{}, {@val={0x8, 0x3, r5}, @void}}, [@NL80211_ATTR_MAC={0xa, 0x6, @device_b}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}]}, 0x64}, 0x1, 0x0, 0x0, 0x2400c001}, 0xc000) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000300)=0x14) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=@newlink={0x40, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x6}, [@IFLA_LINKINFO={0x18, 0x12, 0x0, 0x1, @wireguard={{0xe}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r6}]}, 0x40}}, 0x0) socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) [ 2679.327123][T27622] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2679.352585][T27622] workqueue: Failed to create a rescuer kthread for wq "bond1134": -EINTR [ 2679.558470][T27635] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:53 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf08c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:53 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9, 0x101bf) 19:47:53 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 16) [ 2679.651546][T27635] bond1208: entered promiscuous mode 19:47:53 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xa, 0x101bf) [ 2679.687400][T27635] 8021q: adding VLAN 0 to HW filter on device bond1208 [ 2679.808678][T27663] FAULT_INJECTION: forcing a failure. [ 2679.808678][T27663] name fail_usercopy, interval 1, probability 0, space 0, times 0 [ 2679.822163][T27663] CPU: 1 PID: 27663 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2679.832646][T27663] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2679.842763][T27663] Call Trace: [ 2679.846099][T27663] [ 2679.849089][T27663] dump_stack_lvl+0x125/0x1b0 [ 2679.853852][T27663] should_fail_ex+0x496/0x5b0 [ 2679.858626][T27663] copyin+0x1b/0xa0 [ 2679.862534][T27663] copy_page_from_iter_atomic+0x257/0x13e0 [ 2679.868428][T27663] ? ext4_da_write_begin+0x228/0x8c0 [ 2679.873816][T27663] ? csum_and_copy_to_iter+0x1430/0x1430 [ 2679.879529][T27663] ? ext4_write_begin+0x1100/0x1100 [ 2679.884823][T27663] ? ext4_da_write_end+0x1fe/0xb40 [ 2679.890049][T27663] generic_perform_write+0x2e8/0x600 [ 2679.895445][T27663] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2679.900921][T27663] ? iunique+0x380/0x380 [ 2679.905269][T27663] ext4_buffered_write_iter+0x11f/0x3c0 [ 2679.910897][T27663] ext4_file_write_iter+0x7ee/0x1950 [ 2679.916278][T27663] ? lock_sync+0x190/0x190 [ 2679.920788][T27663] ? ext4_file_splice_read+0x150/0x150 [ 2679.926345][T27663] vfs_write+0x650/0xe40 [ 2679.930683][T27663] ? kernel_write+0x6c0/0x6c0 [ 2679.935461][T27663] ? __fget_files+0x279/0x410 [ 2679.940245][T27663] ksys_write+0x12f/0x250 [ 2679.944661][T27663] ? __ia32_sys_read+0xb0/0xb0 [ 2679.949562][T27663] ? syscall_enter_from_user_mode+0x26/0x80 [ 2679.955546][T27663] do_syscall_64+0x38/0xb0 [ 2679.960032][T27663] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2679.965997][T27663] RIP: 0033:0x7f8cd127cae9 [ 2679.970469][T27663] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2679.990142][T27663] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2679.998624][T27663] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2680.006660][T27663] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2680.014690][T27663] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2680.022724][T27663] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2680.030763][T27663] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2680.038850][T27663] [ 2680.118415][T27638] bond1208: (slave bridge1110): making interface the new active one [ 2680.132693][T27638] bridge1110: entered promiscuous mode [ 2680.146994][T27638] bond1208: (slave bridge1110): Enslaving as an active interface with an up link [ 2680.161537][T27652] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:54 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x74150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:54 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 17) 19:47:54 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb, 0x101bf) [ 2680.287044][T27670] FAULT_INJECTION: forcing a failure. [ 2680.287044][T27670] name failslab, interval 1, probability 0, space 0, times 0 [ 2680.303553][T27670] CPU: 0 PID: 27670 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2680.314062][T27670] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2680.324179][T27670] Call Trace: [ 2680.327512][T27670] [ 2680.330492][T27670] dump_stack_lvl+0x125/0x1b0 [ 2680.335253][T27670] should_fail_ex+0x496/0x5b0 [ 2680.340025][T27670] should_failslab+0x9/0x20 [ 2680.344608][T27670] kmem_cache_alloc+0x33a/0x3b0 [ 2680.349568][T27670] ? reacquire_held_locks+0x4b0/0x4b0 [ 2680.355026][T27670] ? preempt_count_sub+0x150/0x150 [ 2680.360236][T27670] jbd2__journal_start+0x190/0x690 [ 2680.365456][T27670] __ext4_journal_start_sb+0x40f/0x5c0 [ 2680.371017][T27670] ? ext4_dirty_inode+0xa1/0x130 [ 2680.376049][T27670] ? ext4_setattr+0x29e0/0x29e0 [ 2680.380984][T27670] ext4_dirty_inode+0xa1/0x130 [ 2680.381587][T27652] bond1251: entered promiscuous mode [ 2680.385813][T27670] ? rcu_is_watching+0x12/0xb0 [ 2680.385875][T27670] __mark_inode_dirty+0x1e0/0xd50 [ 2680.385925][T27670] ? folio_flags.constprop.0+0x56/0x150 [ 2680.403931][T27652] 8021q: adding VLAN 0 to HW filter on device bond1251 [ 2680.406583][T27670] generic_write_end+0x35f/0x440 [ 2680.406634][T27670] ext4_da_write_end+0x1fe/0xb40 [ 2680.423474][T27670] generic_perform_write+0x32e/0x600 [ 2680.428831][T27670] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2680.434259][T27670] ? iunique+0x380/0x380 [ 2680.438551][T27670] ext4_buffered_write_iter+0x11f/0x3c0 [ 2680.444204][T27670] ext4_file_write_iter+0x7ee/0x1950 [ 2680.449537][T27670] ? lock_sync+0x190/0x190 [ 2680.453999][T27670] ? ext4_file_splice_read+0x150/0x150 [ 2680.463341][T27670] vfs_write+0x650/0xe40 [ 2680.467658][T27670] ? kernel_write+0x6c0/0x6c0 [ 2680.472397][T27670] ? __fget_files+0x279/0x410 [ 2680.477139][T27670] ksys_write+0x12f/0x250 [ 2680.481521][T27670] ? __ia32_sys_read+0xb0/0xb0 [ 2680.486335][T27670] ? syscall_enter_from_user_mode+0x26/0x80 [ 2680.492283][T27670] do_syscall_64+0x38/0xb0 [ 2680.496750][T27670] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2680.502681][T27670] RIP: 0033:0x7f8cd127cae9 [ 2680.507124][T27670] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2680.526788][T27670] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 19:47:54 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xc, 0x101bf) [ 2680.535239][T27670] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2680.543235][T27670] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2680.551229][T27670] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2680.559226][T27670] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2680.567225][T27670] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2680.575250][T27670] 19:47:54 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd, 0x101bf) 19:47:54 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 18) [ 2680.611554][T27653] workqueue: Failed to create a rescuer kthread for wq "wg-crypt-wireguard%d": -EINTR [ 2680.893026][T27682] FAULT_INJECTION: forcing a failure. [ 2680.893026][T27682] name fail_usercopy, interval 1, probability 0, space 0, times 0 [ 2680.917479][T27682] CPU: 1 PID: 27682 Comm: syz-executor.2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2680.927961][T27682] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2680.938058][T27682] Call Trace: [ 2680.941364][T27682] [ 2680.944325][T27682] dump_stack_lvl+0x125/0x1b0 [ 2680.949066][T27682] should_fail_ex+0x496/0x5b0 [ 2680.953802][T27682] copyin+0x1b/0xa0 [ 2680.957669][T27682] copy_page_from_iter_atomic+0x257/0x13e0 [ 2680.963526][T27682] ? ext4_da_write_begin+0x228/0x8c0 [ 2680.968887][T27682] ? csum_and_copy_to_iter+0x1430/0x1430 [ 2680.974568][T27682] ? ext4_write_begin+0x1100/0x1100 [ 2680.979839][T27682] generic_perform_write+0x2e8/0x600 [ 2680.985200][T27682] ? folio_add_wait_queue+0x1c0/0x1c0 [ 2680.990636][T27682] ? iunique+0x380/0x380 [ 2680.994943][T27682] ext4_buffered_write_iter+0x11f/0x3c0 [ 2681.000538][T27682] ext4_file_write_iter+0x7ee/0x1950 [ 2681.005888][T27682] ? lock_sync+0x190/0x190 [ 2681.010358][T27682] ? ext4_file_splice_read+0x150/0x150 [ 2681.015880][T27682] vfs_write+0x650/0xe40 [ 2681.020185][T27682] ? kernel_write+0x6c0/0x6c0 [ 2681.024931][T27682] ? __fget_files+0x279/0x410 [ 2681.029689][T27682] ksys_write+0x12f/0x250 [ 2681.034076][T27682] ? __ia32_sys_read+0xb0/0xb0 [ 2681.038905][T27682] ? syscall_enter_from_user_mode+0x26/0x80 [ 2681.044867][T27682] do_syscall_64+0x38/0xb0 [ 2681.049336][T27682] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 2681.055273][T27682] RIP: 0033:0x7f8cd127cae9 [ 2681.059718][T27682] Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 [ 2681.079371][T27682] RSP: 002b:00007f8cd1fb70c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2681.087830][T27682] RAX: ffffffffffffffda RBX: 00007f8cd139bf80 RCX: 00007f8cd127cae9 [ 2681.095837][T27682] RDX: 00000000000101bf RSI: 0000000020000380 RDI: 0000000000000003 [ 2681.103839][T27682] RBP: 00007f8cd1fb7120 R08: 0000000000000000 R09: 0000000000000000 [ 2681.111843][T27682] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 [ 2681.119847][T27682] R13: 000000000000000b R14: 00007f8cd139bf80 R15: 00007ffc64ae9898 [ 2681.127871][T27682] [ 2681.229868][T27654] bond1251: (slave bridge1180): making interface the new active one [ 2681.273849][T27654] bridge1180: entered promiscuous mode [ 2681.294654][T27654] bond1251: (slave bridge1180): Enslaving as an active interface with an up link [ 2681.323610][T27659] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:55 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7ae70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:55 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe, 0x101bf) [ 2681.346915][T27659] workqueue: Failed to create a rescuer kthread for wq "bond1134": -EINTR [ 2681.473123][T27673] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:47:55 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf0ffffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:55 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) syz_genetlink_get_family_id$nl80211(&(0x7f00000001c0), r1) (async) syz_genetlink_get_family_id$nl80211(&(0x7f00000001c0), r1) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000080)={'wlan1\x00'}) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000080)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_SET_MPATH(r0, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000140)={0x64, r3, 0x100, 0x70bd29, 0x25dfdbfb, {{}, {@val={0x8, 0x3, r5}, @void}}, [@NL80211_ATTR_MAC={0xa, 0x6, @device_b}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}]}, 0x64}, 0x1, 0x0, 0x0, 0x2400c001}, 0xc000) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000300)=0x14) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=@newlink={0x40, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x6}, [@IFLA_LINKINFO={0x18, 0x12, 0x0, 0x1, @wireguard={{0xe}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r6}]}, 0x40}}, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=@newlink={0x40, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x6}, [@IFLA_LINKINFO={0x18, 0x12, 0x0, 0x1, @wireguard={{0xe}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r6}]}, 0x40}}, 0x0) socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) 19:47:55 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (fail_nth: 19) 19:47:55 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf, 0x101bf) 19:47:55 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x75150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:55 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x10, 0x101bf) [ 2681.518804][T27673] workqueue: Failed to create a rescuer kthread for wq "bond1209": -EINTR [ 2681.771612][T27686] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:55 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:47:56 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x11, 0x101bf) [ 2681.949140][T27686] bond1252: entered promiscuous mode [ 2681.957292][T27686] 8021q: adding VLAN 0 to HW filter on device bond1252 19:47:56 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x2, 0x101bf) 19:47:56 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x91, 0x101bf) [ 2682.234435][T27688] bond1252: (slave bridge1181): making interface the new active one [ 2682.251181][T27688] bridge1181: entered promiscuous mode [ 2682.320180][T27688] bond1252: (slave bridge1181): Enslaving as an active interface with an up link [ 2682.352816][T27697] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:47:56 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7be70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:56 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3, 0x101bf) [ 2682.479360][T27697] bond1134: entered promiscuous mode [ 2682.485367][T27697] 8021q: adding VLAN 0 to HW filter on device bond1134 19:47:56 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf18c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:56 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1bf, 0x101bf) 19:47:56 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) syz_genetlink_get_family_id$nl80211(&(0x7f00000001c0), r1) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000080)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_SET_MPATH(r0, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000140)={0x64, r3, 0x100, 0x70bd29, 0x25dfdbfb, {{}, {@val={0x8, 0x3, r5}, @void}}, [@NL80211_ATTR_MAC={0xa, 0x6, @device_b}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}]}, 0x64}, 0x1, 0x0, 0x0, 0x2400c001}, 0xc000) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000300)=0x14) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=@newlink={0x40, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x6}, [@IFLA_LINKINFO={0x18, 0x12, 0x0, 0x1, @wireguard={{0xe}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r6}]}, 0x40}}, 0x0) socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) syz_genetlink_get_family_id$nl80211(&(0x7f00000001c0), r1) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000080)={'wlan1\x00'}) (async) sendmsg$NL80211_CMD_SET_MPATH(r0, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000140)={0x64, r3, 0x100, 0x70bd29, 0x25dfdbfb, {{}, {@val={0x8, 0x3, r5}, @void}}, [@NL80211_ATTR_MAC={0xa, 0x6, @device_b}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}]}, 0x64}, 0x1, 0x0, 0x0, 0x2400c001}, 0xc000) (async) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000300)=0x14) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=@newlink={0x40, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x6}, [@IFLA_LINKINFO={0x18, 0x12, 0x0, 0x1, @wireguard={{0xe}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r6}]}, 0x40}}, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) [ 2682.543512][T27704] bond1134: (slave bridge1063): making interface the new active one [ 2682.551605][T27704] bridge1063: entered promiscuous mode [ 2682.562011][T27704] bond1134: (slave bridge1063): Enslaving as an active interface with an up link 19:47:56 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x4, 0x101bf) [ 2682.651660][T27709] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2682.758969][T27709] bond1209: entered promiscuous mode [ 2682.769623][T27709] 8021q: adding VLAN 0 to HW filter on device bond1209 [ 2682.833755][T27710] bond1209: (slave bridge1111): making interface the new active one [ 2682.841982][T27710] bridge1111: entered promiscuous mode [ 2682.859053][T27710] bond1209: (slave bridge1111): Enslaving as an active interface with an up link 19:47:56 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x76150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:56 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x300, 0x101bf) 19:47:57 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x5, 0x101bf) [ 2682.930246][T27723] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2683.014565][T27723] bond1253: entered promiscuous mode [ 2683.020538][T27723] 8021q: adding VLAN 0 to HW filter on device bond1253 19:47:57 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x500, 0x101bf) [ 2683.096419][T27726] bond1253: (slave bridge1182): making interface the new active one [ 2683.105209][T27726] bridge1182: entered promiscuous mode [ 2683.119402][T27726] bond1253: (slave bridge1182): Enslaving as an active interface with an up link 19:47:57 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x6, 0x101bf) 19:47:57 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7ce70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:57 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x7, 0x101bf) 19:47:57 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x600, 0x101bf) [ 2683.277861][T27734] workqueue: Failed to create a rescuer kthread for wq "wg-crypt-wireguard%d": -EINTR [ 2683.534722][T27737] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2683.684337][T27737] bond1135: entered promiscuous mode [ 2683.690159][T27737] 8021q: adding VLAN 0 to HW filter on device bond1135 19:47:57 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf28c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:57 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x700, 0x101bf) 19:47:57 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3, 0x101bf) 19:47:57 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x8, 0x101bf) [ 2683.767183][T27740] bond1135: (slave bridge1064): making interface the new active one [ 2683.776893][T27740] bridge1064: entered promiscuous mode [ 2683.787927][T27740] bond1135: (slave bridge1064): Enslaving as an active interface with an up link [ 2683.802568][T27746] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2683.907294][T27746] bond1210: entered promiscuous mode [ 2683.914522][T27746] 8021q: adding VLAN 0 to HW filter on device bond1210 [ 2684.013813][T27747] bond1210: (slave bridge1112): making interface the new active one [ 2684.022081][T27747] bridge1112: entered promiscuous mode [ 2684.035477][T27747] bond1210: (slave bridge1112): Enslaving as an active interface with an up link [ 2684.050957][T27755] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:47:58 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x77150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:58 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9, 0x101bf) 19:47:58 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x900, 0x101bf) 19:47:58 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9, 0x101bf) 19:47:58 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xa, 0x101bf) 19:47:58 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7de70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:58 executing program 3: r0 = socket$inet6_sctp(0xa, 0x1, 0x84) setsockopt$inet_sctp6_SCTP_FRAGMENT_INTERLEAVE(r0, 0x84, 0x12, &(0x7f0000000040)=0x9, 0x4) bind$inet6(r0, &(0x7f0000000000)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, &(0x7f00000000c0)="d375ac16095665e742ae9887f1c5d4a2462392566b37cc43e6b71bc4e0e9714efe2d4e23df120824aef8b4d595fdc3b9480adff73bb1ae359194878c54", 0x3d, 0x14040000, &(0x7f0000000240)={0xa, 0x4e23, 0x5, @empty}, 0x1c) sendto$inet6(r0, &(0x7f0000000080)="03", 0x1a000, 0x0, &(0x7f0000000100)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) r2 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r2, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) connect$inet6(r2, &(0x7f0000000040)={0xa, 0xfffe, 0x0, @dev={0xfe, 0x80, '\x00', 0x22}, 0x2}, 0x1c) setsockopt$inet6_tcp_TCP_ULP(r2, 0x6, 0x1f, &(0x7f0000000000), 0x4) setsockopt$inet6_tcp_TLS_TX(r2, 0x11a, 0x2, &(0x7f0000000180)=@gcm_128={{0x303, 0x38}, "00000100", "6a1d45a8ef8ee704328c671d000000f6", "94a92000", "920b00652000"}, 0x28) sendto$inet6(r2, &(0x7f0000000140)="3aef3afec6837190c565686f3f9b517a43e54c1af9630c0fec21aecbe9a1d90f2fd66f48d639c23baef437f41349e24f191457db644ac1f221346c86087af833d1dfb3a53eb10bbba140833b046c795f71ee3d214200a5ea725f81b273eee9637104bd6b14c053ea8434eeba616521b65e57e5275942e737d00fb53553ef0494f7c64e2cf4b5d562f0287269d68e85e82393915cc0536d05e00dc2e1499fd46f169ed2e3a3029296de529343338fcd7bdab01d0a1e81f4ce811754fc9d462b73ada5564314ecf58fb2e8c2bbf3c6f2ecd24e05ac17254d3069b669b35792cfbaafa5c6a0a5a7f54564eeca9d", 0xec, 0x20000844, 0x0, 0x0) 19:47:58 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xa00, 0x101bf) [ 2684.155487][T27755] workqueue: Failed to create a rescuer kthread for wq "bond1254": -EINTR [ 2684.482324][T27773] bond1136: entered promiscuous mode [ 2684.577293][T27773] 8021q: adding VLAN 0 to HW filter on device bond1136 [ 2684.710188][T27775] bond1136: (slave bridge1065): making interface the new active one [ 2684.719318][T27775] bridge1065: entered promiscuous mode [ 2684.735898][T27775] bond1136: (slave bridge1065): Enslaving as an active interface with an up link 19:47:58 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf38c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:58 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb, 0x101bf) 19:47:58 executing program 3: r0 = socket$inet6_sctp(0xa, 0x1, 0x84) setsockopt$inet_sctp6_SCTP_FRAGMENT_INTERLEAVE(r0, 0x84, 0x12, &(0x7f0000000040)=0x9, 0x4) bind$inet6(r0, &(0x7f0000000000)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, &(0x7f00000000c0)="d375ac16095665e742ae9887f1c5d4a2462392566b37cc43e6b71bc4e0e9714efe2d4e23df120824aef8b4d595fdc3b9480adff73bb1ae359194878c54", 0x3d, 0x14040000, &(0x7f0000000240)={0xa, 0x4e23, 0x5, @empty}, 0x1c) (async) sendto$inet6(r0, &(0x7f0000000080)="03", 0x1a000, 0x0, &(0x7f0000000100)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) r2 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r2, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) (async) connect$inet6(r2, &(0x7f0000000040)={0xa, 0xfffe, 0x0, @dev={0xfe, 0x80, '\x00', 0x22}, 0x2}, 0x1c) (async) setsockopt$inet6_tcp_TCP_ULP(r2, 0x6, 0x1f, &(0x7f0000000000), 0x4) (async) setsockopt$inet6_tcp_TLS_TX(r2, 0x11a, 0x2, &(0x7f0000000180)=@gcm_128={{0x303, 0x38}, "00000100", "6a1d45a8ef8ee704328c671d000000f6", "94a92000", "920b00652000"}, 0x28) sendto$inet6(r2, &(0x7f0000000140)="3aef3afec6837190c565686f3f9b517a43e54c1af9630c0fec21aecbe9a1d90f2fd66f48d639c23baef437f41349e24f191457db644ac1f221346c86087af833d1dfb3a53eb10bbba140833b046c795f71ee3d214200a5ea725f81b273eee9637104bd6b14c053ea8434eeba616521b65e57e5275942e737d00fb53553ef0494f7c64e2cf4b5d562f0287269d68e85e82393915cc0536d05e00dc2e1499fd46f169ed2e3a3029296de529343338fcd7bdab01d0a1e81f4ce811754fc9d462b73ada5564314ecf58fb2e8c2bbf3c6f2ecd24e05ac17254d3069b669b35792cfbaafa5c6a0a5a7f54564eeca9d", 0xec, 0x20000844, 0x0, 0x0) 19:47:58 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb00, 0x101bf) [ 2684.837167][T27785] bond1211: entered promiscuous mode [ 2684.843112][T27785] 8021q: adding VLAN 0 to HW filter on device bond1211 [ 2684.935972][T27787] bond1211: (slave bridge1113): making interface the new active one [ 2684.946447][T27787] bridge1113: entered promiscuous mode [ 2684.980863][T27787] bond1211: (slave bridge1113): Enslaving as an active interface with an up link 19:47:59 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x78150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:59 executing program 3: r0 = socket$inet6_sctp(0xa, 0x1, 0x84) setsockopt$inet_sctp6_SCTP_FRAGMENT_INTERLEAVE(r0, 0x84, 0x12, &(0x7f0000000040)=0x9, 0x4) (async) bind$inet6(r0, &(0x7f0000000000)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, &(0x7f00000000c0)="d375ac16095665e742ae9887f1c5d4a2462392566b37cc43e6b71bc4e0e9714efe2d4e23df120824aef8b4d595fdc3b9480adff73bb1ae359194878c54", 0x3d, 0x14040000, &(0x7f0000000240)={0xa, 0x4e23, 0x5, @empty}, 0x1c) (async) sendto$inet6(r0, &(0x7f0000000080)="03", 0x1a000, 0x0, &(0x7f0000000100)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) (async) r2 = socket$inet6_tcp(0xa, 0x1, 0x0) setsockopt$inet6_tcp_int(r2, 0x6, 0x13, &(0x7f0000000100)=0x100000001, 0x4) (async) connect$inet6(r2, &(0x7f0000000040)={0xa, 0xfffe, 0x0, @dev={0xfe, 0x80, '\x00', 0x22}, 0x2}, 0x1c) (async) setsockopt$inet6_tcp_TCP_ULP(r2, 0x6, 0x1f, &(0x7f0000000000), 0x4) setsockopt$inet6_tcp_TLS_TX(r2, 0x11a, 0x2, &(0x7f0000000180)=@gcm_128={{0x303, 0x38}, "00000100", "6a1d45a8ef8ee704328c671d000000f6", "94a92000", "920b00652000"}, 0x28) sendto$inet6(r2, &(0x7f0000000140)="3aef3afec6837190c565686f3f9b517a43e54c1af9630c0fec21aecbe9a1d90f2fd66f48d639c23baef437f41349e24f191457db644ac1f221346c86087af833d1dfb3a53eb10bbba140833b046c795f71ee3d214200a5ea725f81b273eee9637104bd6b14c053ea8434eeba616521b65e57e5275942e737d00fb53553ef0494f7c64e2cf4b5d562f0287269d68e85e82393915cc0536d05e00dc2e1499fd46f169ed2e3a3029296de529343338fcd7bdab01d0a1e81f4ce811754fc9d462b73ada5564314ecf58fb2e8c2bbf3c6f2ecd24e05ac17254d3069b669b35792cfbaafa5c6a0a5a7f54564eeca9d", 0xec, 0x20000844, 0x0, 0x0) 19:47:59 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xc00, 0x101bf) 19:47:59 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xc, 0x101bf) 19:47:59 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd, 0x101bf) [ 2685.272531][T27796] bond1254: entered promiscuous mode [ 2685.311145][T27796] 8021q: adding VLAN 0 to HW filter on device bond1254 [ 2685.480104][T27798] bond1254: (slave bridge1183): making interface the new active one [ 2685.490382][T27798] bridge1183: entered promiscuous mode [ 2685.504833][T27798] bond1254: (slave bridge1183): Enslaving as an active interface with an up link 19:47:59 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd00, 0x101bf) 19:47:59 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb00, 0x101bf) 19:47:59 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7ee70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2685.571370][T27807] validate_nla: 3 callbacks suppressed [ 2685.571393][T27807] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2685.756791][T27807] bond1137: entered promiscuous mode [ 2685.765414][T27807] 8021q: adding VLAN 0 to HW filter on device bond1137 [ 2685.852598][T27809] bond1137: (slave bridge1066): making interface the new active one [ 2685.864727][T27809] bridge1066: entered promiscuous mode [ 2685.880185][T27809] bond1137: (slave bridge1066): Enslaving as an active interface with an up link 19:47:59 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe, 0x101bf) 19:47:59 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe00, 0x101bf) 19:47:59 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf48c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:47:59 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd00, 0x101bf) [ 2685.907039][T27822] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2686.140441][T27822] bond1212: entered promiscuous mode [ 2686.155188][T27822] 8021q: adding VLAN 0 to HW filter on device bond1212 19:48:00 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x79150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:00 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf00, 0x101bf) 19:48:00 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf, 0x101bf) 19:48:00 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = bpf$BPF_LINK_CREATE_XDP(0x1c, &(0x7f0000000000)={r0, 0x0, 0x25, 0x2}, 0x10) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) r5 = socket$netlink(0x10, 0x3, 0xb) sendmsg$BATADV_CMD_GET_GATEWAYS(r5, &(0x7f0000000380)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000340)={&(0x7f00000002c0)={0x44, 0x0, 0x1, 0x70bd29, 0x25dfdbff, {}, [@BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED={0x5, 0x2f, 0x1}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5, 0x29, 0x1}, @BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x5}, @BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xcd9f}, @BATADV_ATTR_VLANID={0x6}]}, 0x44}, 0x1, 0x0, 0x0, 0x4000010}, 0x40) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) write$binfmt_script(r1, &(0x7f0000000240)=ANY=[@ANYRES16=r1, @ANYRESOCT=r1, @ANYRES8=r0, @ANYRES64, @ANYRESOCT=r1, @ANYRES64=r4, @ANYRES8=r0, @ANYRESHEX=r3], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r0, 0x0) r6 = socket$nl_generic(0x10, 0x3, 0x10) r7 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) r8 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r8, 0x8933, &(0x7f00000003c0)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_CHANNEL_SWITCH(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f00000000c0)=ANY=[@ANYBLOB="0100000314004a232a6250e1057a90377857baba3cdfbbf26274b5f01638be2ce8f29d6483e968c0662f9906cbb62d38b567bc1a810b29adad14087d5f3b2e3c7aff43237d14c52e13ea0d6d8bf12064c33b8fc6f8771c8cbf60fac37dc28fa75164af6a3afe708a6fd4dbceba866a0ecb606ac48001e0f028060a5a7672319f670c13b2a708117123a4f58a1b21cf0287f9acb18c16687dfe0b2a60c669512fe5567a0539004a9913a93e1af9cccf99015ce1e36f019924453f41189188fe33eb2568f31b8d52944458dc61", @ANYRES16=r7, @ANYBLOB="010000000000000000006600000008000300", @ANYRES32=r9, @ANYBLOB="08002600000000000800a70000020000"], 0x2c}}, 0x0) sendfile(r6, 0xffffffffffffffff, 0x0, 0x100004001) [ 2686.245978][T27823] bond1212: (slave bridge1114): making interface the new active one [ 2686.254970][T27823] bridge1114: entered promiscuous mode [ 2686.268362][T27823] bond1212: (slave bridge1114): Enslaving as an active interface with an up link 19:48:00 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x10, 0x101bf) [ 2686.420859][T27833] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:00 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1100, 0x101bf) 19:48:00 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1f00, 0x101bf) [ 2686.580871][T27833] bond1255: entered promiscuous mode [ 2686.622685][T27833] 8021q: adding VLAN 0 to HW filter on device bond1255 [ 2686.752890][T27834] bond1255: (slave bridge1184): making interface the new active one [ 2686.762313][T27834] bridge1184: entered promiscuous mode [ 2686.785904][T27834] bond1255: (slave bridge1184): Enslaving as an active interface with an up link 19:48:00 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7fe70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:00 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x11, 0x101bf) 19:48:00 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x2000, 0x101bf) [ 2686.830758][T27844] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2686.949502][T27844] bond1138: entered promiscuous mode [ 2686.956500][T27844] 8021q: adding VLAN 0 to HW filter on device bond1138 19:48:01 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf58c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:01 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x91, 0x101bf) [ 2687.065071][T27845] bond1138: (slave bridge1067): making interface the new active one [ 2687.077221][T27845] bridge1067: entered promiscuous mode [ 2687.090052][T27845] bond1138: (slave bridge1067): Enslaving as an active interface with an up link [ 2687.101022][T27856] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2687.250572][T27856] bond1213: entered promiscuous mode [ 2687.257614][T27856] 8021q: adding VLAN 0 to HW filter on device bond1213 19:48:01 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7a000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:01 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3f00, 0x101bf) 19:48:01 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = bpf$BPF_LINK_CREATE_XDP(0x1c, &(0x7f0000000000)={r0, 0x0, 0x25, 0x2}, 0x10) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) r5 = socket$netlink(0x10, 0x3, 0xb) sendmsg$BATADV_CMD_GET_GATEWAYS(r5, &(0x7f0000000380)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000340)={&(0x7f00000002c0)={0x44, 0x0, 0x1, 0x70bd29, 0x25dfdbff, {}, [@BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED={0x5, 0x2f, 0x1}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5, 0x29, 0x1}, @BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x5}, @BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xcd9f}, @BATADV_ATTR_VLANID={0x6}]}, 0x44}, 0x1, 0x0, 0x0, 0x4000010}, 0x40) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) write$binfmt_script(r1, &(0x7f0000000240)=ANY=[@ANYRES16=r1, @ANYRESOCT=r1, @ANYRES8=r0, @ANYRES64, @ANYRESOCT=r1, @ANYRES64=r4, @ANYRES8=r0, @ANYRESHEX=r3], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r0, 0x0) r6 = socket$nl_generic(0x10, 0x3, 0x10) r7 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) r8 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r8, 0x8933, &(0x7f00000003c0)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_CHANNEL_SWITCH(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f00000000c0)=ANY=[@ANYBLOB="0100000314004a232a6250e1057a90377857baba3cdfbbf26274b5f01638be2ce8f29d6483e968c0662f9906cbb62d38b567bc1a810b29adad14087d5f3b2e3c7aff43237d14c52e13ea0d6d8bf12064c33b8fc6f8771c8cbf60fac37dc28fa75164af6a3afe708a6fd4dbceba866a0ecb606ac48001e0f028060a5a7672319f670c13b2a708117123a4f58a1b21cf0287f9acb18c16687dfe0b2a60c669512fe5567a0539004a9913a93e1af9cccf99015ce1e36f019924453f41189188fe33eb2568f31b8d52944458dc61", @ANYRES16=r7, @ANYBLOB="010000000000000000006600000008000300", @ANYRES32=r9, @ANYBLOB="08002600000000000800a70000020000"], 0x2c}}, 0x0) sendfile(r6, 0xffffffffffffffff, 0x0, 0x100004001) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) bpf$BPF_LINK_CREATE_XDP(0x1c, &(0x7f0000000000)={r0, 0x0, 0x25, 0x2}, 0x10) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) socket$netlink(0x10, 0x3, 0xb) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(r5, &(0x7f0000000380)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000340)={&(0x7f00000002c0)={0x44, 0x0, 0x1, 0x70bd29, 0x25dfdbff, {}, [@BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED={0x5, 0x2f, 0x1}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5, 0x29, 0x1}, @BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x5}, @BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xcd9f}, @BATADV_ATTR_VLANID={0x6}]}, 0x44}, 0x1, 0x0, 0x0, 0x4000010}, 0x40) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) (async) write$binfmt_script(r1, &(0x7f0000000240)=ANY=[@ANYRES16=r1, @ANYRESOCT=r1, @ANYRES8=r0, @ANYRES64, @ANYRESOCT=r1, @ANYRES64=r4, @ANYRES8=r0, @ANYRESHEX=r3], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r0, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) ioctl$sock_SIOCGIFINDEX_80211(r8, 0x8933, &(0x7f00000003c0)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_CHANNEL_SWITCH(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f00000000c0)=ANY=[@ANYBLOB="0100000314004a232a6250e1057a90377857baba3cdfbbf26274b5f01638be2ce8f29d6483e968c0662f9906cbb62d38b567bc1a810b29adad14087d5f3b2e3c7aff43237d14c52e13ea0d6d8bf12064c33b8fc6f8771c8cbf60fac37dc28fa75164af6a3afe708a6fd4dbceba866a0ecb606ac48001e0f028060a5a7672319f670c13b2a708117123a4f58a1b21cf0287f9acb18c16687dfe0b2a60c669512fe5567a0539004a9913a93e1af9cccf99015ce1e36f019924453f41189188fe33eb2568f31b8d52944458dc61", @ANYRES16=r7, @ANYBLOB="010000000000000000006600000008000300", @ANYRES32=r9, @ANYBLOB="08002600000000000800a70000020000"], 0x2c}}, 0x0) (async) sendfile(r6, 0xffffffffffffffff, 0x0, 0x100004001) (async) 19:48:01 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1bf, 0x101bf) [ 2687.326975][T27859] bond1213: (slave bridge1115): making interface the new active one [ 2687.335477][T27859] bridge1115: entered promiscuous mode [ 2687.348865][T27859] bond1213: (slave bridge1115): Enslaving as an active interface with an up link [ 2687.365042][T27873] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:01 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x300, 0x101bf) [ 2687.450326][T27873] bond1256: entered promiscuous mode [ 2687.456613][T27873] 8021q: adding VLAN 0 to HW filter on device bond1256 [ 2687.578279][T27875] bond1256: (slave bridge1185): making interface the new active one [ 2687.587957][T27875] bridge1185: entered promiscuous mode [ 2687.600948][T27875] bond1256: (slave bridge1185): Enslaving as an active interface with an up link 19:48:01 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x8008, 0x101bf) 19:48:01 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x80110000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2687.660774][T27880] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:01 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x500, 0x101bf) [ 2687.787808][T27880] bond1139: entered promiscuous mode 19:48:01 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9100, 0x101bf) [ 2687.822795][T27880] 8021q: adding VLAN 0 to HW filter on device bond1139 19:48:01 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x600, 0x101bf) [ 2687.906758][T27881] bond1139: (slave bridge1068): making interface the new active one [ 2687.965234][T27881] bridge1068: entered promiscuous mode [ 2688.019605][T27881] bond1139: (slave bridge1068): Enslaving as an active interface with an up link [ 2688.079407][T27891] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:02 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf68c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:02 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xbf01, 0x101bf) [ 2688.209916][T27891] bond1214: entered promiscuous mode [ 2688.216271][T27891] 8021q: adding VLAN 0 to HW filter on device bond1214 19:48:02 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7a010000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:02 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = bpf$BPF_LINK_CREATE_XDP(0x1c, &(0x7f0000000000)={r0, 0x0, 0x25, 0x2}, 0x10) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r2, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) socket$netlink(0x10, 0x3, 0xb) (async) r5 = socket$netlink(0x10, 0x3, 0xb) sendmsg$BATADV_CMD_GET_GATEWAYS(r5, &(0x7f0000000380)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000340)={&(0x7f00000002c0)={0x44, 0x0, 0x1, 0x70bd29, 0x25dfdbff, {}, [@BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED={0x5, 0x2f, 0x1}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5, 0x29, 0x1}, @BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x5}, @BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xcd9f}, @BATADV_ATTR_VLANID={0x6}]}, 0x44}, 0x1, 0x0, 0x0, 0x4000010}, 0x40) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(r5, &(0x7f0000000380)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x80000}, 0xc, &(0x7f0000000340)={&(0x7f00000002c0)={0x44, 0x0, 0x1, 0x70bd29, 0x25dfdbff, {}, [@BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED={0x5, 0x2f, 0x1}, @BATADV_ATTR_FRAGMENTATION_ENABLED={0x5}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5, 0x29, 0x1}, @BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x5}, @BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xcd9f}, @BATADV_ATTR_VLANID={0x6}]}, 0x44}, 0x1, 0x0, 0x0, 0x4000010}, 0x40) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r2, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r3, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r4}, @void}}}, 0x1c}}, 0x0) write$binfmt_script(r1, &(0x7f0000000240)=ANY=[@ANYRES16=r1, @ANYRESOCT=r1, @ANYRES8=r0, @ANYRES64, @ANYRESOCT=r1, @ANYRES64=r4, @ANYRES8=r0, @ANYRESHEX=r3], 0x208e24b) (async) write$binfmt_script(r1, &(0x7f0000000240)=ANY=[@ANYRES16=r1, @ANYRESOCT=r1, @ANYRES8=r0, @ANYRES64, @ANYRESOCT=r1, @ANYRES64=r4, @ANYRES8=r0, @ANYRESHEX=r3], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r0, 0x0) r6 = socket$nl_generic(0x10, 0x3, 0x10) r7 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) (async) r8 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r8, 0x8933, &(0x7f00000003c0)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_CHANNEL_SWITCH(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f00000000c0)=ANY=[@ANYBLOB="0100000314004a232a6250e1057a90377857baba3cdfbbf26274b5f01638be2ce8f29d6483e968c0662f9906cbb62d38b567bc1a810b29adad14087d5f3b2e3c7aff43237d14c52e13ea0d6d8bf12064c33b8fc6f8771c8cbf60fac37dc28fa75164af6a3afe708a6fd4dbceba866a0ecb606ac48001e0f028060a5a7672319f670c13b2a708117123a4f58a1b21cf0287f9acb18c16687dfe0b2a60c669512fe5567a0539004a9913a93e1af9cccf99015ce1e36f019924453f41189188fe33eb2568f31b8d52944458dc61", @ANYRES16=r7, @ANYBLOB="010000000000000000006600000008000300", @ANYRES32=r9, @ANYBLOB="08002600000000000800a70000020000"], 0x2c}}, 0x0) (async) sendmsg$NL80211_CMD_CHANNEL_SWITCH(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f00000000c0)=ANY=[@ANYBLOB="0100000314004a232a6250e1057a90377857baba3cdfbbf26274b5f01638be2ce8f29d6483e968c0662f9906cbb62d38b567bc1a810b29adad14087d5f3b2e3c7aff43237d14c52e13ea0d6d8bf12064c33b8fc6f8771c8cbf60fac37dc28fa75164af6a3afe708a6fd4dbceba866a0ecb606ac48001e0f028060a5a7672319f670c13b2a708117123a4f58a1b21cf0287f9acb18c16687dfe0b2a60c669512fe5567a0539004a9913a93e1af9cccf99015ce1e36f019924453f41189188fe33eb2568f31b8d52944458dc61", @ANYRES16=r7, @ANYBLOB="010000000000000000006600000008000300", @ANYRES32=r9, @ANYBLOB="08002600000000000800a70000020000"], 0x2c}}, 0x0) sendfile(r6, 0xffffffffffffffff, 0x0, 0x100004001) 19:48:02 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x700, 0x101bf) [ 2688.367217][T27893] bond1214: (slave bridge1116): making interface the new active one [ 2688.377344][T27893] bridge1116: entered promiscuous mode [ 2688.388801][T27893] bond1214: (slave bridge1116): Enslaving as an active interface with an up link 19:48:02 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x40000, 0x101bf) [ 2688.476310][T27905] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:02 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x900, 0x101bf) 19:48:02 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x100000, 0x101bf) [ 2688.630544][T27905] bond1257: entered promiscuous mode [ 2688.638228][T27905] 8021q: adding VLAN 0 to HW filter on device bond1257 19:48:02 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x80e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:02 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1000000, 0x101bf) 19:48:02 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xa00, 0x101bf) [ 2688.881350][T27909] bond1257: (slave bridge1186): making interface the new active one [ 2688.891260][T27909] bridge1186: entered promiscuous mode [ 2688.906481][T27909] bond1257: (slave bridge1186): Enslaving as an active interface with an up link [ 2688.917674][T27915] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:03 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb00, 0x101bf) [ 2689.048428][T27915] bond1140: entered promiscuous mode [ 2689.054304][T27915] 8021q: adding VLAN 0 to HW filter on device bond1140 [ 2689.133084][T27917] bond1140: (slave bridge1069): making interface the new active one [ 2689.141606][T27917] bridge1069: entered promiscuous mode [ 2689.156262][T27917] bond1140: (slave bridge1069): Enslaving as an active interface with an up link 19:48:03 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x2000000, 0x101bf) 19:48:03 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf78c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2689.308127][T27929] bond1215: entered promiscuous mode [ 2689.316067][T27929] 8021q: adding VLAN 0 to HW filter on device bond1215 [ 2689.362841][T27931] bond1215: (slave bridge1117): making interface the new active one [ 2689.385552][T27931] bridge1117: entered promiscuous mode [ 2689.405757][T27931] bond1215: (slave bridge1117): Enslaving as an active interface with an up link 19:48:03 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7a150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:03 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf58c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:03 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xc00, 0x101bf) 19:48:03 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3000000, 0x101bf) [ 2689.467794][T27943] bond1258: entered promiscuous mode [ 2689.475512][T27943] 8021q: adding VLAN 0 to HW filter on device bond1258 19:48:03 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x4000000, 0x101bf) 19:48:03 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd00, 0x101bf) [ 2689.803003][T27945] bond1258: (slave bridge1187): making interface the new active one [ 2689.813428][T27945] bridge1187: entered promiscuous mode [ 2689.826912][T27945] bond1258: (slave bridge1187): Enslaving as an active interface with an up link 19:48:03 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x81000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:03 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x5000000, 0x101bf) 19:48:03 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe00, 0x101bf) 19:48:04 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x6000000, 0x101bf) [ 2689.968575][T27952] bond1141: entered promiscuous mode [ 2689.974543][T27952] 8021q: adding VLAN 0 to HW filter on device bond1141 [ 2690.178066][T27955] bond1141: (slave bridge1070): making interface the new active one [ 2690.196536][T27955] bridge1070: entered promiscuous mode [ 2690.217134][T27955] bond1141: (slave bridge1070): Enslaving as an active interface with an up link 19:48:04 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x7000000, 0x101bf) 19:48:04 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf88c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2690.299246][T27958] bond12: entered promiscuous mode [ 2690.323430][T27958] 8021q: adding VLAN 0 to HW filter on device bond12 [ 2690.456650][T27963] bond12: (slave bridge9): making interface the new active one [ 2690.466425][T27963] bridge9: entered promiscuous mode [ 2690.483692][T27963] bond12: (slave bridge9): Enslaving as an active interface with an up link 19:48:04 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7b150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:04 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf00, 0x101bf) 19:48:04 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x8000000, 0x101bf) 19:48:04 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf58c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2690.507090][T27966] workqueue: Failed to create a rescuer kthread for wq "bond1216": -EINTR [ 2690.692916][T27981] validate_nla: 5 callbacks suppressed [ 2690.692942][T27981] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:04 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1100, 0x101bf) 19:48:04 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x8800000, 0x101bf) [ 2690.968727][T27981] bond1259: entered promiscuous mode [ 2691.008673][T27981] 8021q: adding VLAN 0 to HW filter on device bond1259 [ 2691.195610][T27982] bond1259: (slave bridge1188): making interface the new active one [ 2691.213363][T27982] bridge1188: entered promiscuous mode [ 2691.238620][T27982] bond1259: (slave bridge1188): Enslaving as an active interface with an up link [ 2691.277275][T27988] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:05 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x81e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:05 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1f00, 0x101bf) 19:48:05 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9000000, 0x101bf) 19:48:05 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3f00, 0x101bf) 19:48:05 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xa000000, 0x101bf) [ 2691.302094][T27988] workqueue: Failed to create a rescuer kthread for wq "bond1142": -EINTR [ 2691.561352][T27996] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 19:48:05 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf98c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2691.656437][T27996] bond13: entered promiscuous mode [ 2691.662318][T27996] 8021q: adding VLAN 0 to HW filter on device bond13 [ 2691.729139][T28000] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2691.760493][T28000] workqueue: Failed to create a rescuer kthread for wq "bond1216": -EINTR [ 2691.907570][T28001] bond13: (slave bridge10): making interface the new active one [ 2691.953779][T28001] bridge10: entered promiscuous mode [ 2691.990857][T28001] bond13: (slave bridge10): Enslaving as an active interface with an up link 19:48:06 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7c150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x8008, 0x101bf) 19:48:06 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb000000, 0x101bf) 19:48:06 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf58c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2692.126799][T28014] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2692.204343][T28014] bond1260: entered promiscuous mode [ 2692.210265][T28014] 8021q: adding VLAN 0 to HW filter on device bond1260 19:48:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9100, 0x101bf) [ 2692.277567][T28020] bond1260: (slave bridge1189): making interface the new active one [ 2692.313399][T28020] bridge1189: entered promiscuous mode 19:48:06 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xc000000, 0x101bf) [ 2692.357406][T28020] bond1260: (slave bridge1189): Enslaving as an active interface with an up link [ 2692.391591][T28026] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:06 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x82020000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xbf01, 0x101bf) [ 2692.511005][T28026] bond1142: entered promiscuous mode [ 2692.523124][T28026] 8021q: adding VLAN 0 to HW filter on device bond1142 19:48:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x40000, 0x101bf) 19:48:06 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd000000, 0x101bf) [ 2692.689998][T28027] bond1142: (slave bridge1071): making interface the new active one [ 2692.699371][T28027] bridge1071: entered promiscuous mode [ 2692.716779][T28027] bond1142: (slave bridge1071): Enslaving as an active interface with an up link 19:48:06 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfa8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2692.753083][T28032] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 19:48:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x4c5ce, 0x101bf) [ 2692.970561][T28032] bond14: entered promiscuous mode [ 2692.995847][T28032] 8021q: adding VLAN 0 to HW filter on device bond14 [ 2693.157182][T28036] bond14: (slave bridge11): making interface the new active one [ 2693.177969][T28036] bridge11: entered promiscuous mode [ 2693.207145][T28036] bond14: (slave bridge11): Enslaving as an active interface with an up link [ 2693.219318][T28038] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2693.251876][T28038] workqueue: Failed to create a rescuer kthread for wq "bond1216": -EINTR [ 2693.406611][T28046] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:07 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7d150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:07 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x100000, 0x101bf) 19:48:07 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe000000, 0x101bf) 19:48:07 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f0000000000)=ANY=[], 0x208e24b) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_devices(r2, 0x0, 0x2, 0x0) openat$cgroup_devices(0xffffffffffffffff, &(0x7f0000000080)='devices.deny\x00', 0x2, 0x0) mmap(&(0x7f00001c2000/0x3000)=nil, 0x3000, 0x2, 0x20010, r2, 0x9cbd2000) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) r3 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000800000000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$MAP_CREATE(0x0, &(0x7f0000000300)=@base={0x0, 0x1, 0x8001, 0x0, 0x0, 0xffffffffffffffff, 0x5, '\x00', 0x0, 0xffffffffffffffff, 0x0, 0x5, 0x3}, 0x48) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='memory.events\x00', 0x100002, 0x0) write$cgroup_type(r4, 0x0, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000480)='rcu_utilization\x00', r3}, 0x10) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) socket$kcm(0x2, 0x2, 0x73) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='memory.events\x00', 0x100002, 0x0) openat$cgroup_ro(r6, 0x0, 0x0, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000140)={&(0x7f0000000000)='ext4_fc_commit_stop\x00', r4}, 0x10) openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000004c0)='cpuacct.usage_percpu\x00', 0x0, 0x0) sendmsg$TIPC_NL_BEARER_ADD(0xffffffffffffffff, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000000c0)={0x14, 0x0, 0x6fc8eaa3ef74a203}, 0x14}}, 0x0) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000380)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x7b26f21d6897895d}, 0xc, &(0x7f00000002c0)={&(0x7f0000000280)={0x3c, 0x0, 0x205, 0x70bd27, 0x25dfdbfb, {}, [@BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xffffffc0}, @BATADV_ATTR_BONDING_ENABLED={0x5, 0x2d, 0x1}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0x6}, @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5, 0x2e, 0x1}]}, 0x3c}, 0x1, 0x0, 0x0, 0x8091}, 0x4000) r7 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r7, r5, 0x0, 0x10000a006) [ 2693.444262][T28046] workqueue: Failed to create a rescuer kthread for wq "bond1261": -EINTR [ 2693.581149][T28060] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:07 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x82e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:07 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x202000, 0x101bf) 19:48:07 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf000000, 0x101bf) [ 2693.762629][ T27] audit: type=1804 audit(1690919287.789:1874): pid=28070 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.3" name="/root/syzkaller-testdir4218298001/syzkaller.DTupMi/6737/cgroup.controllers" dev="sda1" ino=1966 res=1 errno=0 [ 2693.809123][T28060] bond1143: entered promiscuous mode [ 2693.815945][T28060] 8021q: adding VLAN 0 to HW filter on device bond1143 19:48:08 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1000000, 0x101bf) 19:48:08 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x10000000, 0x101bf) 19:48:08 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x2000000, 0x101bf) [ 2694.107127][T28061] bond1143: (slave bridge1072): making interface the new active one [ 2694.134580][T28061] bridge1072: entered promiscuous mode [ 2694.182429][T28061] bond1143: (slave bridge1072): Enslaving as an active interface with an up link 19:48:08 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfb8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:08 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x11000000, 0x101bf) [ 2694.464755][T28074] bond1216: entered promiscuous mode [ 2694.471373][T28074] 8021q: adding VLAN 0 to HW filter on device bond1216 19:48:08 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7e150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:08 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3000000, 0x101bf) 19:48:08 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1f000000, 0x101bf) 19:48:08 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f0000000000)=ANY=[], 0x208e24b) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_devices(r2, 0x0, 0x2, 0x0) openat$cgroup_devices(0xffffffffffffffff, &(0x7f0000000080)='devices.deny\x00', 0x2, 0x0) mmap(&(0x7f00001c2000/0x3000)=nil, 0x3000, 0x2, 0x20010, r2, 0x9cbd2000) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000800000000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) (async) r3 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000800000000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$MAP_CREATE(0x0, &(0x7f0000000300)=@base={0x0, 0x1, 0x8001, 0x0, 0x0, 0xffffffffffffffff, 0x5, '\x00', 0x0, 0xffffffffffffffff, 0x0, 0x5, 0x3}, 0x48) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='memory.events\x00', 0x100002, 0x0) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='memory.events\x00', 0x100002, 0x0) write$cgroup_type(r4, 0x0, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000480)='rcu_utilization\x00', r3}, 0x10) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000480)='rcu_utilization\x00', r3}, 0x10) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) socket$kcm(0x2, 0x2, 0x73) (async) socket$kcm(0x2, 0x2, 0x73) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='memory.events\x00', 0x100002, 0x0) openat$cgroup_ro(r6, 0x0, 0x0, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000140)={&(0x7f0000000000)='ext4_fc_commit_stop\x00', r4}, 0x10) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000140)={&(0x7f0000000000)='ext4_fc_commit_stop\x00', r4}, 0x10) openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000004c0)='cpuacct.usage_percpu\x00', 0x0, 0x0) sendmsg$TIPC_NL_BEARER_ADD(0xffffffffffffffff, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000000c0)={0x14, 0x0, 0x6fc8eaa3ef74a203}, 0x14}}, 0x0) (async) sendmsg$TIPC_NL_BEARER_ADD(0xffffffffffffffff, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000000c0)={0x14, 0x0, 0x6fc8eaa3ef74a203}, 0x14}}, 0x0) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000380)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x7b26f21d6897895d}, 0xc, &(0x7f00000002c0)={&(0x7f0000000280)={0x3c, 0x0, 0x205, 0x70bd27, 0x25dfdbfb, {}, [@BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xffffffc0}, @BATADV_ATTR_BONDING_ENABLED={0x5, 0x2d, 0x1}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0x6}, @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5, 0x2e, 0x1}]}, 0x3c}, 0x1, 0x0, 0x0, 0x8091}, 0x4000) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000380)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x7b26f21d6897895d}, 0xc, &(0x7f00000002c0)={&(0x7f0000000280)={0x3c, 0x0, 0x205, 0x70bd27, 0x25dfdbfb, {}, [@BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xffffffc0}, @BATADV_ATTR_BONDING_ENABLED={0x5, 0x2d, 0x1}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0x6}, @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5, 0x2e, 0x1}]}, 0x3c}, 0x1, 0x0, 0x0, 0x8091}, 0x4000) r7 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r7, r5, 0x0, 0x10000a006) [ 2694.606057][T28076] bond1216: (slave bridge1118): making interface the new active one [ 2694.615030][T28076] bridge1118: entered promiscuous mode [ 2694.630325][T28076] bond1216: (slave bridge1118): Enslaving as an active interface with an up link 19:48:09 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x83e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:09 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x4000000, 0x101bf) 19:48:09 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x20000000, 0x101bf) 19:48:09 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f0000000000)=ANY=[], 0x208e24b) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_devices(r2, 0x0, 0x2, 0x0) openat$cgroup_devices(0xffffffffffffffff, &(0x7f0000000080)='devices.deny\x00', 0x2, 0x0) (async) mmap(&(0x7f00001c2000/0x3000)=nil, 0x3000, 0x2, 0x20010, r2, 0x9cbd2000) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) r3 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000800000000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) (async) bpf$MAP_CREATE(0x0, &(0x7f0000000300)=@base={0x0, 0x1, 0x8001, 0x0, 0x0, 0xffffffffffffffff, 0x5, '\x00', 0x0, 0xffffffffffffffff, 0x0, 0x5, 0x3}, 0x48) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='memory.events\x00', 0x100002, 0x0) write$cgroup_type(r4, 0x0, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000480)='rcu_utilization\x00', r3}, 0x10) (async) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) (async) socket$kcm(0x2, 0x2, 0x73) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='memory.events\x00', 0x100002, 0x0) openat$cgroup_ro(r6, 0x0, 0x0, 0x0) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000140)={&(0x7f0000000000)='ext4_fc_commit_stop\x00', r4}, 0x10) (async) openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000004c0)='cpuacct.usage_percpu\x00', 0x0, 0x0) (async) sendmsg$TIPC_NL_BEARER_ADD(0xffffffffffffffff, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000000c0)={0x14, 0x0, 0x6fc8eaa3ef74a203}, 0x14}}, 0x0) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000380)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x7b26f21d6897895d}, 0xc, &(0x7f00000002c0)={&(0x7f0000000280)={0x3c, 0x0, 0x205, 0x70bd27, 0x25dfdbfb, {}, [@BATADV_ATTR_THROUGHPUT_OVERRIDE={0x8, 0x3b, 0xffffffc0}, @BATADV_ATTR_BONDING_ENABLED={0x5, 0x2d, 0x1}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0x6}, @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5, 0x2e, 0x1}]}, 0x3c}, 0x1, 0x0, 0x0, 0x8091}, 0x4000) (async) r7 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r7, r5, 0x0, 0x10000a006) [ 2694.700665][T28078] workqueue: Failed to create a rescuer kthread for wq "bond1261": -EINTR [ 2695.065678][T28095] bond1144: entered promiscuous mode [ 2695.106113][T28095] 8021q: adding VLAN 0 to HW filter on device bond1144 [ 2695.189587][T28096] bond1144: (slave bridge1073): making interface the new active one [ 2695.204523][T28096] bridge1073: entered promiscuous mode [ 2695.220343][T28096] bond1144: (slave bridge1073): Enslaving as an active interface with an up link 19:48:09 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf000000, 0x101bf) 19:48:09 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x5000000, 0x101bf) 19:48:09 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfc8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:09 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3f000000, 0x101bf) [ 2695.379331][T28108] bond1217: entered promiscuous mode [ 2695.385929][T28108] 8021q: adding VLAN 0 to HW filter on device bond1217 [ 2695.480569][T28110] bond1217: (slave bridge1119): making interface the new active one [ 2695.489624][T28110] bridge1119: entered promiscuous mode [ 2695.504629][T28110] bond1217: (slave bridge1119): Enslaving as an active interface with an up link 19:48:09 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7f150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:09 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x91000000, 0x101bf) 19:48:09 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x6000000, 0x101bf) 19:48:09 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$nl_generic(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000200)=ANY=[@ANYBLOB="680100003a001000000000000000000018000000040000005001018040010600020035c92941a564caa2aabe83cb353caa32968115499b734e98adc4a03c0900acb107d4536cb2f95e64db2b585750dc75d9bc39f389f8167b0ee6ce9d784b21a89c1ef5b4dd4f4fdbcd365fb2fe2a80350000ad34050b7975ea652473f2d00372a855e0af70b092f1a1aedda79ad11c1bff97846b4c91def78be21984ae8298aa7c5a13a2af08ff68b2956a910bfa2117eb9bda3825e8273c7330ed4d2ac0e1c4090aad288d1dfcfb86776ff3f2d4ed1cf73968db8c26fedf1b5d2a4e859f495213e5f25d20517fd038020ed17f000000881b261bf0a17a60369efe698fa6128b80dd7e02f8eb6b13c512b3f32b685cb094310888fae49e034361da24ad99555b390a7a30fd93b105510a70a228997136ae8f6bda8722d1cffe785179ce812e354c8b22b24e4ae79784155da7fc4bda861d800000000000000000000c0006000000000000000000"], 0x168}}, 0x0) r1 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000040), r0) sendmsg$IPVS_CMD_FLUSH(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x3c, r1, 0x500, 0x70bd2d, 0x25dfdbfe, {}, [@IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x2}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x3ca}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x4}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x800}]}, 0x3c}, 0x1, 0x0, 0x0, 0x84}, 0x40000) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000180)={r0}) sendmsg$nl_route(r2, &(0x7f0000000440)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000400)={&(0x7f00000003c0)=@ipv4_getnetconf={0x3c, 0x52, 0x300, 0x70bd2d, 0x25dfdbfb, {}, [@NETCONFA_RP_FILTER={0x8, 0x3, 0x7fff}, @NETCONFA_RP_FILTER={0x8, 0x3, 0x7f}, @NETCONFA_FORWARDING={0x8, 0x2, 0xffffffff}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x2}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x5}]}, 0x3c}, 0x1, 0x0, 0x0, 0x40040}, 0x4000045) [ 2695.606280][T28125] bond1261: entered promiscuous mode [ 2695.614383][T28125] 8021q: adding VLAN 0 to HW filter on device bond1261 [ 2695.818231][T28128] bond1261: (slave bridge1190): making interface the new active one [ 2695.827716][T28128] bridge1190: entered promiscuous mode [ 2695.846443][T28128] bond1261: (slave bridge1190): Enslaving as an active interface with an up link 19:48:09 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x84010000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:09 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$nl_generic(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000200)=ANY=[@ANYBLOB="680100003a001000000000000000000018000000040000005001018040010600020035c92941a564caa2aabe83cb353caa32968115499b734e98adc4a03c0900acb107d4536cb2f95e64db2b585750dc75d9bc39f389f8167b0ee6ce9d784b21a89c1ef5b4dd4f4fdbcd365fb2fe2a80350000ad34050b7975ea652473f2d00372a855e0af70b092f1a1aedda79ad11c1bff97846b4c91def78be21984ae8298aa7c5a13a2af08ff68b2956a910bfa2117eb9bda3825e8273c7330ed4d2ac0e1c4090aad288d1dfcfb86776ff3f2d4ed1cf73968db8c26fedf1b5d2a4e859f495213e5f25d20517fd038020ed17f000000881b261bf0a17a60369efe698fa6128b80dd7e02f8eb6b13c512b3f32b685cb094310888fae49e034361da24ad99555b390a7a30fd93b105510a70a228997136ae8f6bda8722d1cffe785179ce812e354c8b22b24e4ae79784155da7fc4bda861d800000000000000000000c0006000000000000000000"], 0x168}}, 0x0) (async) r1 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000040), r0) sendmsg$IPVS_CMD_FLUSH(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x3c, r1, 0x500, 0x70bd2d, 0x25dfdbfe, {}, [@IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x2}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x3ca}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x4}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x800}]}, 0x3c}, 0x1, 0x0, 0x0, 0x84}, 0x40000) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000180)={r0}) sendmsg$nl_route(r2, &(0x7f0000000440)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000400)={&(0x7f00000003c0)=@ipv4_getnetconf={0x3c, 0x52, 0x300, 0x70bd2d, 0x25dfdbfb, {}, [@NETCONFA_RP_FILTER={0x8, 0x3, 0x7fff}, @NETCONFA_RP_FILTER={0x8, 0x3, 0x7f}, @NETCONFA_FORWARDING={0x8, 0x2, 0xffffffff}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x2}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x5}]}, 0x3c}, 0x1, 0x0, 0x0, 0x40040}, 0x4000045) 19:48:09 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xbf010000, 0x101bf) 19:48:09 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x7000000, 0x101bf) [ 2695.876999][T28137] validate_nla: 5 callbacks suppressed [ 2695.877022][T28137] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:10 executing program 3: r0 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$nl_generic(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000200)=ANY=[@ANYBLOB="680100003a001000000000000000000018000000040000005001018040010600020035c92941a564caa2aabe83cb353caa32968115499b734e98adc4a03c0900acb107d4536cb2f95e64db2b585750dc75d9bc39f389f8167b0ee6ce9d784b21a89c1ef5b4dd4f4fdbcd365fb2fe2a80350000ad34050b7975ea652473f2d00372a855e0af70b092f1a1aedda79ad11c1bff97846b4c91def78be21984ae8298aa7c5a13a2af08ff68b2956a910bfa2117eb9bda3825e8273c7330ed4d2ac0e1c4090aad288d1dfcfb86776ff3f2d4ed1cf73968db8c26fedf1b5d2a4e859f495213e5f25d20517fd038020ed17f000000881b261bf0a17a60369efe698fa6128b80dd7e02f8eb6b13c512b3f32b685cb094310888fae49e034361da24ad99555b390a7a30fd93b105510a70a228997136ae8f6bda8722d1cffe785179ce812e354c8b22b24e4ae79784155da7fc4bda861d800000000000000000000c0006000000000000000000"], 0x168}}, 0x0) (async) r1 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000040), r0) sendmsg$IPVS_CMD_FLUSH(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x3c, r1, 0x500, 0x70bd2d, 0x25dfdbfe, {}, [@IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x2}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x3ca}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x4}, @IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x800}]}, 0x3c}, 0x1, 0x0, 0x0, 0x84}, 0x40000) ioctl$sock_kcm_SIOCKCMCLONE(0xffffffffffffffff, 0x89e2, &(0x7f0000000180)={r0}) sendmsg$nl_route(r2, &(0x7f0000000440)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000400)={&(0x7f00000003c0)=@ipv4_getnetconf={0x3c, 0x52, 0x300, 0x70bd2d, 0x25dfdbfb, {}, [@NETCONFA_RP_FILTER={0x8, 0x3, 0x7fff}, @NETCONFA_RP_FILTER={0x8, 0x3, 0x7f}, @NETCONFA_FORWARDING={0x8, 0x2, 0xffffffff}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x2}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x5}]}, 0x3c}, 0x1, 0x0, 0x0, 0x40040}, 0x4000045) 19:48:10 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf4ffffff, 0x101bf) [ 2696.202051][T28137] bond1145: entered promiscuous mode [ 2696.231734][T28137] 8021q: adding VLAN 0 to HW filter on device bond1145 [ 2696.376312][T28138] bond1145: (slave bridge1074): making interface the new active one [ 2696.386549][T28138] bridge1074: entered promiscuous mode [ 2696.410667][T28138] bond1145: (slave bridge1074): Enslaving as an active interface with an up link 19:48:10 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfd8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:10 executing program 3: r0 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_EXP_GET(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000280)=ANY=[@ANYBLOB="500000000102010400000000000000000a0000003c0001800c000280040001000300fe8000000000000000000000000000aa1400040000000000000000000000ffffe000000100"/80], 0x50}}, 0x0) sendmsg$NL80211_CMD_UPDATE_FT_IES(0xffffffffffffffff, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="90000000", @ANYRES16=0x0, @ANYBLOB="10002dbd7000ffdbdf25600000006c002a008c187e0c34ac537eb5bc8fd0a65561f80085fce309b2c2a8523872060303030303030406e0320500fbff0301067e1502810008021100000108000000010000faffffff750801000000000001008c18a70ac6e8e2e13cce1d0c0c9c300b2c317a4f9e099fc08bb90600b1008a0e00000600b1000002000000"], 0x90}, 0x1, 0x0, 0x0, 0x10}, 0x4008000) [ 2696.428169][T28148] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2696.510258][T28170] netlink: 44 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2696.577059][T28148] bond1218: entered promiscuous mode [ 2696.585947][T28148] 8021q: adding VLAN 0 to HW filter on device bond1218 [ 2696.687407][T28149] bond1218: (slave bridge1120): making interface the new active one [ 2696.697760][T28149] bridge1120: entered promiscuous mode [ 2696.712056][T28149] bond1218: (slave bridge1120): Enslaving as an active interface with an up link [ 2696.727205][T28159] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:10 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x80110000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:10 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x8000000, 0x101bf) 19:48:10 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfbffffff, 0x101bf) 19:48:10 executing program 3: r0 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_EXP_GET(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000280)=ANY=[@ANYBLOB="500000000102010400000000000000000a0000003c0001800c000280040001000300fe8000000000000000000000000000aa1400040000000000000000000000ffffe000000100"/80], 0x50}}, 0x0) sendmsg$NL80211_CMD_UPDATE_FT_IES(0xffffffffffffffff, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="90000000", @ANYRES16=0x0, @ANYBLOB="10002dbd7000ffdbdf25600000006c002a008c187e0c34ac537eb5bc8fd0a65561f80085fce309b2c2a8523872060303030303030406e0320500fbff0301067e1502810008021100000108000000010000faffffff750801000000000001008c18a70ac6e8e2e13cce1d0c0c9c300b2c317a4f9e099fc08bb90600b1008a0e00000600b1000002000000"], 0x90}, 0x1, 0x0, 0x0, 0x10}, 0x4008000) [ 2696.803795][T28179] netlink: 44 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2696.806885][T28159] bond1262: entered promiscuous mode [ 2696.821758][T28159] 8021q: adding VLAN 0 to HW filter on device bond1262 [ 2697.025170][T28161] bond1262: (slave bridge1191): making interface the new active one [ 2697.035326][T28161] bridge1191: entered promiscuous mode [ 2697.052494][T28161] bond1262: (slave bridge1191): Enslaving as an active interface with an up link 19:48:11 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8492958f, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:11 executing program 3: r0 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_EXP_GET(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000280)=ANY=[@ANYBLOB="500000000102010400000000000000000a0000003c0001800c000280040001000300fe8000000000000000000000000000aa1400040000000000000000000000ffffe000000100"/80], 0x50}}, 0x0) sendmsg$NL80211_CMD_UPDATE_FT_IES(0xffffffffffffffff, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="90000000", @ANYRES16=0x0, @ANYBLOB="10002dbd7000ffdbdf25600000006c002a008c187e0c34ac537eb5bc8fd0a65561f80085fce309b2c2a8523872060303030303030406e0320500fbff0301067e1502810008021100000108000000010000faffffff750801000000000001008c18a70ac6e8e2e13cce1d0c0c9c300b2c317a4f9e099fc08bb90600b1008a0e00000600b1000002000000"], 0x90}, 0x1, 0x0, 0x0, 0x10}, 0x4008000) socket$nl_netfilter(0x10, 0x3, 0xc) (async) sendmsg$IPCTNL_MSG_EXP_GET(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000280)=ANY=[@ANYBLOB="500000000102010400000000000000000a0000003c0001800c000280040001000300fe8000000000000000000000000000aa1400040000000000000000000000ffffe000000100"/80], 0x50}}, 0x0) (async) sendmsg$NL80211_CMD_UPDATE_FT_IES(0xffffffffffffffff, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="90000000", @ANYRES16=0x0, @ANYBLOB="10002dbd7000ffdbdf25600000006c002a008c187e0c34ac537eb5bc8fd0a65561f80085fce309b2c2a8523872060303030303030406e0320500fbff0301067e1502810008021100000108000000010000faffffff750801000000000001008c18a70ac6e8e2e13cce1d0c0c9c300b2c317a4f9e099fc08bb90600b1008a0e00000600b1000002000000"], 0x90}, 0x1, 0x0, 0x0, 0x10}, 0x4008000) (async) 19:48:11 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfeffffff, 0x101bf) 19:48:11 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x8800000, 0x101bf) [ 2697.160393][T28172] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2697.193010][T28191] netlink: 44 bytes leftover after parsing attributes in process `syz-executor.3'. 19:48:11 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xffffff7f, 0x101bf) 19:48:11 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9000000, 0x101bf) [ 2697.276260][T28195] netlink: 44 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2697.405433][T28172] bond1146: entered promiscuous mode [ 2697.415583][T28172] 8021q: adding VLAN 0 to HW filter on device bond1146 [ 2697.584444][T28173] bond1146: (slave bridge1075): making interface the new active one [ 2697.592773][T28173] bridge1075: entered promiscuous mode [ 2697.617200][T28173] bond1146: (slave bridge1075): Enslaving as an active interface with an up link 19:48:11 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfe8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:11 executing program 3: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$BATADV_CMD_GET_MESH(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r0, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c000000100001eeffffffff0000000100000000", @ANYRES32=r2, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000002c0)=ANY=[@ANYBLOB="4800000024000b0e0008f7ffffffffffffff0000", @ANYRES32=r2, @ANYBLOB="00000000ffffffff0000000008000100636271001c00020018000100000000924e320a1545bfe00000000000000000"], 0x48}}, 0x0) unshare(0x6c060000) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) r3 = socket$packet(0x11, 0x3, 0x300) r4 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r4, &(0x7f0000000380)={0x0, 0x0, &(0x7f0000000340)={0x0, 0x128}}, 0x0) getsockname$packet(r4, &(0x7f0000000180)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) ioctl$sock_SIOCGIFVLAN_GET_VLAN_EGRESS_PRIORITY_CMD(r1, 0x8982, &(0x7f00000003c0)) bind$packet(r3, &(0x7f00000000c0)={0x11, 0x0, r5, 0x1, 0x0, 0x6, @broadcast}, 0x14) sendto$inet6(r3, &(0x7f0000000100)="0503460008003e00000002000800", 0x36, 0x0, 0x0, 0x0) r6 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r6, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) ioctl$SIOCX25GCAUSEDIAG(r6, 0x89e6, &(0x7f0000000240)={0x7}) [ 2697.655750][T28183] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2697.735113][T28183] bond1219: entered promiscuous mode [ 2697.741093][T28183] 8021q: adding VLAN 0 to HW filter on device bond1219 [ 2697.899606][T28184] bond1219: (slave bridge1121): making interface the new active one [ 2697.918707][T28184] bridge1121: entered promiscuous mode 19:48:12 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x80150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:12 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfffffff4, 0x101bf) 19:48:12 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xa000000, 0x101bf) [ 2697.962245][T28184] bond1219: (slave bridge1121): Enslaving as an active interface with an up link [ 2697.992839][T28196] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:12 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb000000, 0x101bf) [ 2698.253912][T28196] bond1263: entered promiscuous mode [ 2698.260408][T28196] 8021q: adding VLAN 0 to HW filter on device bond1263 [ 2698.446918][T28198] bond1263: (slave bridge1192): making interface the new active one [ 2698.469400][T28198] bridge1192: entered promiscuous mode [ 2698.506755][T28198] bond1263: (slave bridge1192): Enslaving as an active interface with an up link 19:48:12 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x84e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:12 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfffffffb, 0x101bf) 19:48:12 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xc000000, 0x101bf) [ 2698.574308][T28206] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. 19:48:12 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd000000, 0x101bf) 19:48:12 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfffffffe, 0x101bf) [ 2698.691037][T28209] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2698.933726][T28209] bond1147: entered promiscuous mode 19:48:13 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe000000, 0x101bf) [ 2698.954908][T28209] 8021q: adding VLAN 0 to HW filter on device bond1147 [ 2699.140892][T28212] bond1147: (slave bridge1076): making interface the new active one [ 2699.151451][T28212] bridge1076: entered promiscuous mode [ 2699.166763][T28212] bond1147: (slave bridge1076): Enslaving as an active interface with an up link [ 2699.177368][T28221] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:13 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfeff0000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2699.221701][T28221] workqueue: Failed to create a rescuer kthread for wq "bond1220": -EINTR [ 2699.372103][T28233] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2699.430271][T28233] bond1264: entered promiscuous mode [ 2699.436607][T28233] 8021q: adding VLAN 0 to HW filter on device bond1264 [ 2699.496366][T28234] bond1264: (slave bridge1193): making interface the new active one [ 2699.504917][T28234] bridge1193: entered promiscuous mode [ 2699.517711][T28234] bond1264: (slave bridge1193): Enslaving as an active interface with an up link [ 2699.562166][T28245] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2699.610675][T28245] bond1148: entered promiscuous mode [ 2699.616570][T28245] 8021q: adding VLAN 0 to HW filter on device bond1148 [ 2699.680226][T28246] bond1148: (slave bridge1077): making interface the new active one [ 2699.688483][T28246] bridge1077: entered promiscuous mode [ 2699.699989][T28246] bond1148: (slave bridge1077): Enslaving as an active interface with an up link 19:48:16 executing program 3: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$BATADV_CMD_GET_MESH(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r0, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c000000100001eeffffffff0000000100000000", @ANYRES32=r2, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000002c0)=ANY=[@ANYBLOB="4800000024000b0e0008f7ffffffffffffff0000", @ANYRES32=r2, @ANYBLOB="00000000ffffffff0000000008000100636271001c00020018000100000000924e320a1545bfe00000000000000000"], 0x48}}, 0x0) unshare(0x6c060000) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) r3 = socket$packet(0x11, 0x3, 0x300) r4 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r4, &(0x7f0000000380)={0x0, 0x0, &(0x7f0000000340)={0x0, 0x128}}, 0x0) getsockname$packet(r4, &(0x7f0000000180)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) ioctl$sock_SIOCGIFVLAN_GET_VLAN_EGRESS_PRIORITY_CMD(r1, 0x8982, &(0x7f00000003c0)) bind$packet(r3, &(0x7f00000000c0)={0x11, 0x0, r5, 0x1, 0x0, 0x6, @broadcast}, 0x14) sendto$inet6(r3, &(0x7f0000000100)="0503460008003e00000002000800", 0x36, 0x0, 0x0, 0x0) r6 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r6, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) ioctl$SIOCX25GCAUSEDIAG(r6, 0x89e6, &(0x7f0000000240)={0x7}) socket$nl_route(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$BATADV_CMD_GET_MESH(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) (async) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) sendmsg$nl_route(r0, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c000000100001eeffffffff0000000100000000", @ANYRES32=r2, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) (async) sendmsg$nl_route_sched(r1, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000002c0)=ANY=[@ANYBLOB="4800000024000b0e0008f7ffffffffffffff0000", @ANYRES32=r2, @ANYBLOB="00000000ffffffff0000000008000100636271001c00020018000100000000924e320a1545bfe00000000000000000"], 0x48}}, 0x0) (async) unshare(0x6c060000) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) (async) socket$packet(0x11, 0x3, 0x300) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$nl_route_sched(r4, &(0x7f0000000380)={0x0, 0x0, &(0x7f0000000340)={0x0, 0x128}}, 0x0) (async) getsockname$packet(r4, &(0x7f0000000180)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) ioctl$sock_SIOCGIFVLAN_GET_VLAN_EGRESS_PRIORITY_CMD(r1, 0x8982, &(0x7f00000003c0)) (async) bind$packet(r3, &(0x7f00000000c0)={0x11, 0x0, r5, 0x1, 0x0, 0x6, @broadcast}, 0x14) (async) sendto$inet6(r3, &(0x7f0000000100)="0503460008003e00000002000800", 0x36, 0x0, 0x0, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$nl_route_sched(r6, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) (async) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) ioctl$SIOCX25GCAUSEDIAG(r6, 0x89e6, &(0x7f0000000240)={0x7}) (async) 19:48:16 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf000000, 0x101bf) 19:48:16 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xffffffff000, 0x101bf) 19:48:16 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x81000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:16 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x85e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:16 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfeffffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:16 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x10000000, 0x101bf) 19:48:16 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x7fc98cc7cae9, 0x101bf) [ 2702.118187][T28258] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2702.228884][T28258] bond1220: entered promiscuous mode [ 2702.241447][T28258] 8021q: adding VLAN 0 to HW filter on device bond1220 19:48:16 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x11000000, 0x101bf) 19:48:16 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x800800000000, 0x101bf) [ 2702.277652][T28254] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:16 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x4000000000000, 0x101bf) [ 2702.428699][T28254] bond1265: entered promiscuous mode [ 2702.447177][T28254] 8021q: adding VLAN 0 to HW filter on device bond1265 [ 2702.462909][T28260] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:16 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1f000000, 0x101bf) [ 2702.618241][T28260] bond1149: entered promiscuous mode [ 2702.626037][T28260] 8021q: adding VLAN 0 to HW filter on device bond1149 [ 2702.712704][T28263] bond1220: (slave bridge1122): making interface the new active one [ 2702.721445][T28263] bridge1122: entered promiscuous mode [ 2702.737484][T28263] bond1220: (slave bridge1122): Enslaving as an active interface with an up link [ 2702.818703][T28264] bond1265: (slave bridge1194): making interface the new active one [ 2702.827665][T28264] bridge1194: entered promiscuous mode [ 2702.843085][T28264] bond1265: (slave bridge1194): Enslaving as an active interface with an up link [ 2702.944644][T28265] bond1149: (slave bridge1078): making interface the new active one [ 2702.954736][T28265] bridge1078: entered promiscuous mode [ 2702.968859][T28265] bond1149: (slave bridge1078): Enslaving as an active interface with an up link [ 2702.979517][T28268] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. 19:48:20 executing program 3: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$BATADV_CMD_GET_MESH(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) (async) sendmsg$BATADV_CMD_GET_MESH(r1, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000480)={0x0, 0x92}}, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r0, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c000000100001eeffffffff0000000100000000", @ANYRES32=r2, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000002c0)=ANY=[@ANYBLOB="4800000024000b0e0008f7ffffffffffffff0000", @ANYRES32=r2, @ANYBLOB="00000000ffffffff0000000008000100636271001c00020018000100000000924e320a1545bfe00000000000000000"], 0x48}}, 0x0) unshare(0x6c060000) (async) unshare(0x6c060000) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) r3 = socket$packet(0x11, 0x3, 0x300) r4 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r4, &(0x7f0000000380)={0x0, 0x0, &(0x7f0000000340)={0x0, 0x128}}, 0x0) getsockname$packet(r4, &(0x7f0000000180)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) ioctl$sock_SIOCGIFVLAN_GET_VLAN_EGRESS_PRIORITY_CMD(r1, 0x8982, &(0x7f00000003c0)) bind$packet(r3, &(0x7f00000000c0)={0x11, 0x0, r5, 0x1, 0x0, 0x6, @broadcast}, 0x14) (async) bind$packet(r3, &(0x7f00000000c0)={0x11, 0x0, r5, 0x1, 0x0, 0x6, @broadcast}, 0x14) sendto$inet6(r3, &(0x7f0000000100)="0503460008003e00000002000800", 0x36, 0x0, 0x0, 0x0) socket(0x10, 0x803, 0x0) (async) r6 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r6, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) ioctl$SIOCX25GCAUSEDIAG(r6, 0x89e6, &(0x7f0000000240)={0x7}) 19:48:20 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x10000000000000, 0x101bf) 19:48:20 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3f000000, 0x101bf) 19:48:20 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x81150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:20 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x86e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:20 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xff7f0000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2706.512382][T28300] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:20 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf0ffffff0f0000, 0x101bf) 19:48:20 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x91000000, 0x101bf) 19:48:20 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x100000000000000, 0x101bf) [ 2706.726119][T28300] bond1266: entered promiscuous mode 19:48:20 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x200000000000000, 0x101bf) 19:48:20 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xbf010000, 0x101bf) [ 2706.759785][T28300] 8021q: adding VLAN 0 to HW filter on device bond1266 19:48:20 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x300000000000000, 0x101bf) [ 2706.815496][T28302] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2706.961354][T28302] bond1221: entered promiscuous mode [ 2706.980369][T28302] 8021q: adding VLAN 0 to HW filter on device bond1221 [ 2707.006180][T28303] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2707.106567][T28303] bond1150: entered promiscuous mode [ 2707.121820][T28303] 8021q: adding VLAN 0 to HW filter on device bond1150 [ 2707.138915][T28306] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. [ 2707.237092][T28308] bond1266: (slave bridge1195): making interface the new active one [ 2707.246365][T28308] bridge1195: entered promiscuous mode [ 2707.260540][T28308] bond1266: (slave bridge1195): Enslaving as an active interface with an up link [ 2707.369797][T28311] bond1150: (slave bridge1079): making interface the new active one [ 2707.381585][T28311] bridge1079: entered promiscuous mode [ 2707.399806][T28311] bond1150: (slave bridge1079): Enslaving as an active interface with an up link [ 2707.492574][T28312] bond1221: (slave bridge1123): making interface the new active one [ 2707.502472][T28312] bridge1123: entered promiscuous mode [ 2707.520039][T28312] bond1221: (slave bridge1123): Enslaving as an active interface with an up link 19:48:24 executing program 3: socketpair$unix(0x1, 0x5, 0x0, &(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) sendto$unix(r0, 0x0, 0x0, 0x4040094, &(0x7f0000000140)=@file={0x0, './file0\x00'}, 0x6e) 19:48:24 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xcec50400, 0x101bf) 19:48:24 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x400000000000000, 0x101bf) 19:48:24 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x87e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:24 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xff8c0800, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:24 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x82150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2710.097285][T28345] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:24 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf4ffffff, 0x101bf) 19:48:24 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x500000000000000, 0x101bf) 19:48:24 executing program 3: socketpair$unix(0x1, 0x5, 0x0, &(0x7f00000000c0)={0xffffffffffffffff, 0xffffffffffffffff}) sendto$unix(r0, 0x0, 0x0, 0xc800, 0x0, 0x0) 19:48:24 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfbffffff, 0x101bf) 19:48:24 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x600000000000000, 0x101bf) [ 2710.297991][T28345] bond1151: entered promiscuous mode [ 2710.317391][T28345] 8021q: adding VLAN 0 to HW filter on device bond1151 19:48:24 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfeffffff, 0x101bf) 19:48:24 executing program 3: socket(0x11, 0x803, 0x0) [ 2710.402829][T28342] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:24 executing program 3: socketpair$unix(0x1, 0x5, 0x0, &(0x7f0000000040)={0xffffffffffffffff}) recvmmsg$unix(r0, 0x0, 0x0, 0x40000021, 0x0) [ 2710.511835][T28342] bond1267: entered promiscuous mode 19:48:24 executing program 3: socketpair$unix(0x1, 0x5, 0x0, &(0x7f00000000c0)={0xffffffffffffffff, 0xffffffffffffffff}) sendto$unix(r0, 0x0, 0x0, 0x0, &(0x7f0000000140)=@file={0x0, './file0\x00'}, 0x6e) [ 2710.534040][T28342] 8021q: adding VLAN 0 to HW filter on device bond1267 [ 2710.619772][T28344] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2710.751833][T28344] bond1222: entered promiscuous mode [ 2710.758913][T28344] 8021q: adding VLAN 0 to HW filter on device bond1222 [ 2710.869539][T28349] bond1267: (slave bridge1196): making interface the new active one [ 2710.879024][T28349] bridge1196: entered promiscuous mode [ 2710.903098][T28349] bond1267: (slave bridge1196): Enslaving as an active interface with an up link 19:48:25 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x88a8ffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:25 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x700000000000000, 0x101bf) [ 2711.049223][T28348] bond1151: (slave bridge1080): making interface the new active one [ 2711.059600][T28348] bridge1080: entered promiscuous mode [ 2711.078525][T28348] bond1151: (slave bridge1080): Enslaving as an active interface with an up link [ 2711.211938][T28352] bond1222: (slave bridge1124): making interface the new active one [ 2711.225396][T28352] bridge1124: entered promiscuous mode [ 2711.248538][T28352] bond1222: (slave bridge1124): Enslaving as an active interface with an up link 19:48:25 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x83150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:25 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xffffff7f, 0x101bf) 19:48:25 executing program 3: r0 = socket$inet6_udp(0xa, 0x2, 0x0) setsockopt$inet6_IPV6_ADDRFORM(r0, 0x29, 0x1, 0xfffffffffffffffc, 0x0) 19:48:25 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xffff0300, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:25 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x800000000000000, 0x101bf) [ 2711.304628][T28378] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:25 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfffffff4, 0x101bf) 19:48:25 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x900000000000000, 0x101bf) [ 2711.431764][T28378] bond1268: entered promiscuous mode [ 2711.447127][T28378] 8021q: adding VLAN 0 to HW filter on device bond1268 19:48:25 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfffffffb, 0x101bf) 19:48:25 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xa00000000000000, 0x101bf) 19:48:25 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfffffffe, 0x101bf) [ 2711.679416][T28379] bond1268: (slave bridge1197): making interface the new active one [ 2711.727212][T28379] bridge1197: entered promiscuous mode [ 2711.786523][T28379] bond1268: (slave bridge1197): Enslaving as an active interface with an up link 19:48:25 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x88e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:25 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb00000000000000, 0x101bf) [ 2711.870390][T28385] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2711.999586][T28385] bond1152: entered promiscuous mode [ 2712.007098][T28385] 8021q: adding VLAN 0 to HW filter on device bond1152 [ 2712.137328][T28394] bond1152: (slave bridge1081): making interface the new active one [ 2712.147740][T28394] bridge1081: entered promiscuous mode [ 2712.162606][T28394] bond1152: (slave bridge1081): Enslaving as an active interface with an up link [ 2712.173000][T28395] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2712.251021][T28395] bond1223: entered promiscuous mode [ 2712.257540][T28395] 8021q: adding VLAN 0 to HW filter on device bond1223 [ 2712.357339][T28396] bond1223: (slave bridge1125): making interface the new active one [ 2712.367011][T28396] bridge1125: entered promiscuous mode [ 2712.386903][T28396] bond1223: (slave bridge1125): Enslaving as an active interface with an up link 19:48:26 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x84010000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:26 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xffffffff000, 0x101bf) 19:48:26 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xc00000000000000, 0x101bf) 19:48:26 executing program 3: socketpair$unix(0x1, 0x5, 0x0, &(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) sendmmsg$unix(r0, &(0x7f0000004fc0), 0x0, 0x40) 19:48:26 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xffffa888, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2712.421306][T28413] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:26 executing program 3: socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000100)={0xffffffffffffffff}) recvmsg(r0, &(0x7f0000000440)={0x0, 0x0, 0x0}, 0x42) [ 2712.467421][T28413] bond1269: entered promiscuous mode [ 2712.479499][T28413] 8021q: adding VLAN 0 to HW filter on device bond1269 [ 2712.587327][T28415] bond1269: (slave bridge1198): making interface the new active one [ 2712.606550][T28415] bridge1198: entered promiscuous mode 19:48:26 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd00000000000000, 0x101bf) 19:48:26 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x7f8cd127cae9, 0x101bf) [ 2712.636226][T28415] bond1269: (slave bridge1198): Enslaving as an active interface with an up link [ 2712.671060][T28423] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:26 executing program 3: socketpair$unix(0x1, 0x5, 0x0, &(0x7f0000000100)={0xffffffffffffffff}) sendmmsg$inet(r0, 0x0, 0x0, 0x0) 19:48:26 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x89e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2712.748981][T28423] bond1153: entered promiscuous mode [ 2712.758490][T28423] 8021q: adding VLAN 0 to HW filter on device bond1153 19:48:26 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe00000000000000, 0x101bf) 19:48:26 executing program 3: socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000100)={0xffffffffffffffff}) recvmsg(r0, &(0x7f0000000640)={0x0, 0x0, 0x0, 0x0, 0x0, 0xfffffffffffffd5a}, 0x0) [ 2713.024686][T28430] bond1153: (slave bridge1082): making interface the new active one [ 2713.053583][T28430] bridge1082: entered promiscuous mode [ 2713.097247][T28430] bond1153: (slave bridge1082): Enslaving as an active interface with an up link [ 2713.113104][T28431] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2713.201349][T28431] bond1224: entered promiscuous mode [ 2713.209305][T28431] 8021q: adding VLAN 0 to HW filter on device bond1224 [ 2713.278606][T28433] bond1224: (slave bridge1126): making interface the new active one [ 2713.286920][T28433] bridge1126: entered promiscuous mode [ 2713.304672][T28433] bond1224: (slave bridge1126): Enslaving as an active interface with an up link [ 2713.320579][T28443] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:27 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x84150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x800800000000, 0x101bf) 19:48:27 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf00000000000000, 0x101bf) 19:48:27 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfffff000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2713.416888][T28443] bond1270: entered promiscuous mode [ 2713.422672][T28443] 8021q: adding VLAN 0 to HW filter on device bond1270 19:48:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x4000000000000, 0x101bf) 19:48:27 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1000000000000000, 0x101bf) [ 2713.540818][T28444] bond1270: (slave bridge1199): making interface the new active one [ 2713.554848][T28444] bridge1199: entered promiscuous mode [ 2713.568902][T28444] bond1270: (slave bridge1199): Enslaving as an active interface with an up link 19:48:27 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8a600000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x10000000000000, 0x101bf) 19:48:27 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1100000000000000, 0x101bf) [ 2713.777649][T28456] bond1154: entered promiscuous mode [ 2713.795971][T28456] 8021q: adding VLAN 0 to HW filter on device bond1154 19:48:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x20200000000000, 0x101bf) 19:48:27 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1f00000000000000, 0x101bf) 19:48:27 executing program 3: socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000100)={0xffffffffffffffff, 0xffffffffffffffff}) setsockopt$sock_void(r0, 0x1, 0x1b, 0x0, 0x0) [ 2714.116514][T28459] bond1154: (slave bridge1083): making interface the new active one [ 2714.125483][T28459] bridge1083: entered promiscuous mode [ 2714.139767][T28459] bond1154: (slave bridge1083): Enslaving as an active interface with an up link [ 2714.211023][T28461] bond1225: entered promiscuous mode [ 2714.222285][T28461] 8021q: adding VLAN 0 to HW filter on device bond1225 19:48:28 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x85150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:28 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf0ffffff0f0000, 0x101bf) 19:48:28 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x2000000000000000, 0x101bf) 19:48:28 executing program 3: socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000100)={0xffffffffffffffff, 0xffffffffffffffff}) recvmsg(r0, &(0x7f0000000640)={0x0, 0x0, 0x0}, 0x40000160) 19:48:28 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xffffff7f, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2714.300036][T28462] bond1225: (slave bridge1127): making interface the new active one [ 2714.308699][T28462] bridge1127: entered promiscuous mode [ 2714.322413][T28462] bond1225: (slave bridge1127): Enslaving as an active interface with an up link 19:48:28 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x100000000000000, 0x101bf) [ 2714.501749][T28471] bond1271: entered promiscuous mode [ 2714.524380][T28471] 8021q: adding VLAN 0 to HW filter on device bond1271 [ 2714.648095][T28473] bond1271: (slave bridge1200): making interface the new active one [ 2714.665965][T28473] bridge1200: entered promiscuous mode 19:48:28 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8ae70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:28 executing program 3: socketpair$unix(0x1, 0x5, 0x0, &(0x7f0000000100)={0xffffffffffffffff}) getsockopt$sock_int(r0, 0x1, 0xb, &(0x7f0000001200), &(0x7f0000001240)=0x4) 19:48:28 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3f00000000000000, 0x101bf) 19:48:28 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x200000000000000, 0x101bf) [ 2714.690209][T28473] bond1271: (slave bridge1200): Enslaving as an active interface with an up link 19:48:28 executing program 3: r0 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r0, &(0x7f0000000380)={0x0, 0x0, &(0x7f0000000340)={&(0x7f00000001c0)=@mpls_getnetconf={0x14, 0x52, 0x1}, 0x14}}, 0x0) 19:48:28 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9100000000000000, 0x101bf) [ 2714.939017][T28492] bond1155: entered promiscuous mode [ 2714.956496][T28492] 8021q: adding VLAN 0 to HW filter on device bond1155 [ 2715.065287][T28498] bond1155: (slave bridge1084): making interface the new active one [ 2715.077932][T28498] bridge1084: entered promiscuous mode [ 2715.093406][T28498] bond1155: (slave bridge1084): Enslaving as an active interface with an up link [ 2715.109276][T28496] validate_nla: 4 callbacks suppressed [ 2715.109301][T28496] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2715.204731][T28496] bond1226: entered promiscuous mode [ 2715.210728][T28496] 8021q: adding VLAN 0 to HW filter on device bond1226 [ 2715.300834][T28500] bond1226: (slave bridge1128): making interface the new active one [ 2715.311636][T28500] bridge1128: entered promiscuous mode [ 2715.334421][T28500] bond1226: (slave bridge1128): Enslaving as an active interface with an up link 19:48:29 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x86150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:29 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x300000000000000, 0x101bf) 19:48:29 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xbf01000000000000, 0x101bf) 19:48:29 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xffffff9e, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2715.365909][T28511] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2715.499335][T28511] bond1272: entered promiscuous mode [ 2715.505619][T28511] 8021q: adding VLAN 0 to HW filter on device bond1272 19:48:29 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe9cac78cc97f0000, 0x101bf) 19:48:29 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x400000000000000, 0x101bf) [ 2715.615383][T28517] bond1272: (slave bridge1201): making interface the new active one [ 2715.625890][T28517] bridge1201: entered promiscuous mode [ 2715.639193][T28517] bond1272: (slave bridge1201): Enslaving as an active interface with an up link 19:48:29 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8be70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:29 executing program 3: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='net_prio.prioidx\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000400)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r0, 0x0) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup/syz1\x00', 0x200002, 0x0) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r3, @ANYBLOB="01e5ff000000000004003b1c210008000300", @ANYRES32=r2, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001505050505050"], 0x448}}, 0x0) preadv(r0, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f0000000240)=ANY=[], 0x3af4701e) sendfile(r5, r4, 0x0, 0x10000a006) [ 2715.734014][T28525] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:29 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf4ffffff00000000, 0x101bf) 19:48:29 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfbffffff00000000, 0x101bf) 19:48:30 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x500000000000000, 0x101bf) [ 2715.926728][T28525] bond1156: entered promiscuous mode [ 2715.932469][T28525] 8021q: adding VLAN 0 to HW filter on device bond1156 19:48:30 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfeffffff00000000, 0x101bf) [ 2715.995865][ T27] audit: type=1804 audit(1690919310.029:1875): pid=28537 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.3" name="/root/syzkaller-testdir4218298001/syzkaller.DTupMi/6764/cgroup.controllers" dev="sda1" ino=1954 res=1 errno=0 [ 2716.006244][T28528] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2716.074786][ T27] audit: type=1804 audit(1690919310.029:1876): pid=28537 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=ToMToU comm="syz-executor.3" name="/root/syzkaller-testdir4218298001/syzkaller.DTupMi/6764/cgroup.controllers" dev="sda1" ino=1954 res=1 errno=0 [ 2716.176793][T28528] bond1227: entered promiscuous mode [ 2716.182539][T28528] 8021q: adding VLAN 0 to HW filter on device bond1227 [ 2716.236654][T28529] bond1156: (slave bridge1085): making interface the new active one [ 2716.245068][T28529] bridge1085: entered promiscuous mode [ 2716.258947][T28529] bond1156: (slave bridge1085): Enslaving as an active interface with an up link [ 2716.326791][T28530] bond1227: (slave bridge1129): making interface the new active one [ 2716.337051][T28530] bridge1129: entered promiscuous mode [ 2716.350112][T28530] bond1227: (slave bridge1129): Enslaving as an active interface with an up link [ 2716.361056][T28539] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:30 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x87150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:30 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xffffff7f00000000, 0x101bf) 19:48:30 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x600000000000000, 0x101bf) 19:48:30 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xffffffa1, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2716.454656][T28539] bond1273: entered promiscuous mode [ 2716.460485][T28539] 8021q: adding VLAN 0 to HW filter on device bond1273 19:48:30 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x700000000000000, 0x101bf) 19:48:30 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xffffffff00000000, 0x101bf) [ 2716.610238][T28544] bond1273: (slave bridge1202): making interface the new active one [ 2716.659680][T28544] bridge1202: entered promiscuous mode [ 2716.711456][T28544] bond1273: (slave bridge1202): Enslaving as an active interface with an up link [ 2716.750319][T28561] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:30 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x800000000000000, 0x101bf) 19:48:30 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x16}]}, &(0x7f0000000140)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:30 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8ce70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2716.851955][T28561] bond1157: entered promiscuous mode [ 2716.858698][T28561] 8021q: adding VLAN 0 to HW filter on device bond1157 [ 2716.875412][T28562] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:31 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x10c80) [ 2716.932519][T28562] bond1228: entered promiscuous mode [ 2716.938673][T28562] 8021q: adding VLAN 0 to HW filter on device bond1228 19:48:31 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x900000000000000, 0x101bf) 19:48:31 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0xfffffdef) [ 2717.119480][T28563] bond1157: (slave bridge1086): making interface the new active one [ 2717.147417][T28563] bridge1086: entered promiscuous mode [ 2717.186075][T28563] bond1157: (slave bridge1086): Enslaving as an active interface with an up link [ 2717.377596][T28565] bond1228: (slave bridge1130): making interface the new active one [ 2717.388404][T28565] bridge1130: entered promiscuous mode [ 2717.436122][T28565] bond1228: (slave bridge1130): Enslaving as an active interface with an up link [ 2717.481375][T28576] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:31 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x88150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:31 executing program 3: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='net_prio.prioidx\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000400)=ANY=[@ANYRES64], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r0, 0x0) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r3, @ANYBLOB="01e5ff000000000004003b1c210008000300", @ANYRES32=r2, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001"], 0x448}}, 0x0) preadv(r0, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) bpf$PROG_LOAD(0x5, &(0x7f0000000200)={0x0, 0xe, &(0x7f00000013c0)=ANY=[@ANYBLOB="b702000026000000bfa300000000000007030000007effff7a0af0fff8ffffff79a4f0ff00000000b7060000ffffffff2d6405000000000065060400010000050404000001007d60b7030000000000006a0a00fe000000008500000028000000b7000000000000009500000000000000496cf27fb6d2c643db7e2d5fb4b0936cdf827fb43a431ca711fcd0cdfa146ed3d09a6175037958e27106e225b7937f02008b5e5a076d83923dd29c034055b67dafe6c8dc525d78c07f34e4d5b3185b310efcfa89147a09000000f110026e6d2ef831ab7ea0c34f17e3ad6eecbb622003b538dfd8e012e79578e51bc53099e90f4580d760551b5b0a341a2d7cbdb9cd38bdb2ca8e050000003a14817ac61e4dd11183a13477bf7e060e3670ef0e789f65f1328d6704902cbe7bc04b82d2789cb132b8667c214733a18c8b6619f28d9961b626c57c2691208173656d60a17e3c184b751c51160fbcbbdb5b1e7be6148ba532e60a0ac346dfebd3"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f0000000240)=ANY=[], 0x3af4701e) sendfile(r5, r4, 0x0, 0x10000a006) 19:48:31 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xa00000000000000, 0x101bf) 19:48:31 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xffffffc3, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2717.599023][T28576] bond1274: entered promiscuous mode [ 2717.651910][T28576] 8021q: adding VLAN 0 to HW filter on device bond1274 19:48:31 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xb00000000000000, 0x101bf) [ 2717.822242][ T27] audit: type=1804 audit(1690919311.849:1877): pid=28595 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.3" name="/root/syzkaller-testdir4218298001/syzkaller.DTupMi/6766/cgroup.controllers" dev="sda1" ino=1973 res=1 errno=0 [ 2717.879596][ T27] audit: type=1804 audit(1690919311.849:1878): pid=28595 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=ToMToU comm="syz-executor.3" name="/root/syzkaller-testdir4218298001/syzkaller.DTupMi/6766/cgroup.controllers" dev="sda1" ino=1973 res=1 errno=0 19:48:32 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xc00000000000000, 0x101bf) [ 2717.974109][T28580] bond1274: (slave bridge1203): making interface the new active one [ 2717.999462][T28580] bridge1203: entered promiscuous mode [ 2718.026062][T28580] bond1274: (slave bridge1203): Enslaving as an active interface with an up link [ 2718.069955][T28589] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:32 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8de70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:32 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0xfffffffffffffdef) 19:48:32 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xd00000000000000, 0x101bf) [ 2718.204558][T28589] bond1158: entered promiscuous mode [ 2718.211076][T28589] 8021q: adding VLAN 0 to HW filter on device bond1158 [ 2718.228489][T28593] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:32 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe00000000000000, 0x101bf) [ 2718.330268][T28593] bond1229: entered promiscuous mode [ 2718.362527][T28593] 8021q: adding VLAN 0 to HW filter on device bond1229 19:48:32 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf00000000000000, 0x101bf) [ 2718.558865][ T1232] ieee802154 phy0 wpan0: encryption failed: -22 [ 2718.566201][ T1232] ieee802154 phy1 wpan1: encryption failed: -22 19:48:32 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = openat$cgroup(r0, &(0x7f0000000000)='syz1\x00', 0x200002, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) [ 2718.642266][T28596] bond1158: (slave bridge1087): making interface the new active one [ 2718.661001][T28596] bridge1087: entered promiscuous mode [ 2718.710871][T28596] bond1158: (slave bridge1087): Enslaving as an active interface with an up link [ 2718.848154][T28597] bond1229: (slave bridge1131): making interface the new active one [ 2718.860543][T28597] bridge1131: entered promiscuous mode [ 2718.879932][T28597] bond1229: (slave bridge1131): Enslaving as an active interface with an up link 19:48:33 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x88a8ffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:33 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = openat$cgroup(r0, &(0x7f0000000000)='syz1\x00', 0x200002, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) 19:48:33 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1000000000000000, 0x101bf) 19:48:33 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xffffffe4, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:33 executing program 3: r0 = socket$unix(0x1, 0x1, 0x0) getsockopt$sock_int(r0, 0x1, 0x10, &(0x7f0000001480), &(0x7f00000014c0)=0x4) [ 2719.037202][T28607] bond1275: entered promiscuous mode [ 2719.043020][T28607] 8021q: adding VLAN 0 to HW filter on device bond1275 19:48:33 executing program 3: socketpair$unix(0x1, 0x5, 0x0, &(0x7f0000000100)={0xffffffffffffffff}) getsockopt$sock_int(r0, 0x1, 0x4, &(0x7f0000000000), &(0x7f0000000040)=0x4) [ 2719.234862][T28622] bond1159: entered promiscuous mode [ 2719.240671][T28622] 8021q: adding VLAN 0 to HW filter on device bond1159 19:48:33 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8e000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:33 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1100000000000000, 0x101bf) 19:48:33 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = openat$cgroup(r0, &(0x7f0000000000)='syz1\x00', 0x200002, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) 19:48:33 executing program 3: syz_emit_ethernet(0x36, &(0x7f0000000000)={@random="f3f83ac34982", @remote, @val, {@ipv4}}, 0x0) [ 2719.298064][T28611] bond1275: (slave bridge1204): making interface the new active one [ 2719.306366][T28611] bridge1204: entered promiscuous mode [ 2719.318939][T28611] bond1275: (slave bridge1204): Enslaving as an active interface with an up link 19:48:33 executing program 3: r0 = socket$inet_sctp(0x2, 0x5, 0x84) setsockopt$inet_sctp_SCTP_HMAC_IDENT(r0, 0x84, 0x14, &(0x7f0000000280)={0x1, [0x0]}, 0x6) 19:48:33 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)=ANY=[@ANYBLOB, @ANYRES16=r2, @ANYBLOB="010000000000000000005a00000008000300", @ANYRES32=r3, @ANYBLOB], 0x1c}}, 0x0) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x1000008, 0x80010, r1, 0xdd5ff000) [ 2719.568756][T28633] bond1230: entered promiscuous mode [ 2719.610732][T28633] 8021q: adding VLAN 0 to HW filter on device bond1230 [ 2719.710314][T28632] bond1159: (slave bridge1088): making interface the new active one [ 2719.721407][T28632] bridge1088: entered promiscuous mode [ 2719.740160][T28632] bond1159: (slave bridge1088): Enslaving as an active interface with an up link [ 2719.837605][T28635] bond1230: (slave bridge1132): making interface the new active one [ 2719.848666][T28635] bridge1132: entered promiscuous mode [ 2719.864764][T28635] bond1230: (slave bridge1132): Enslaving as an active interface with an up link [ 2719.905750][T20905] BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low! [ 2719.911451][T20905] turning off the locking correctness validator. [ 2719.917807][T20905] CPU: 1 PID: 20905 Comm: kworker/1:2 Not tainted 6.5.0-rc3-syzkaller-00730-g01e6f8ad8d26 #0 [ 2719.927999][T20905] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2023 [ 2719.938098][T20905] Workqueue: events nsim_dev_trap_report_work [ 2719.944347][T20905] Call Trace: [ 2719.947657][T20905] [ 2719.950625][T20905] dump_stack_lvl+0xd9/0x1b0 [ 2719.955284][T20905] __lock_acquire+0x4286/0x5de0 [ 2719.960202][T20905] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 2719.966250][T20905] lock_acquire+0x1ae/0x510 [ 2719.970799][T20905] ? try_to_wake_up+0xab/0x15a0 [ 2719.972662][T28651] bond1276: entered promiscuous mode [ 2719.975695][T20905] ? lock_sync+0x190/0x190 [ 2719.975745][T20905] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 2719.975790][T20905] ? lock_acquire+0x1ae/0x510 [ 2719.975833][T20905] ? find_held_lock+0x2d/0x110 [ 2719.975871][T20905] _raw_spin_lock_irqsave+0x3a/0x50 [ 2719.975906][T20905] ? try_to_wake_up+0xab/0x15a0 [ 2719.975953][T20905] try_to_wake_up+0xab/0x15a0 [ 2719.976004][T20905] ? lock_acquire+0x1ae/0x510 [ 2719.976044][T20905] ? sched_ttwu_pending+0x560/0x560 [ 2719.976097][T20905] ? lock_sync+0x190/0x190 [ 2719.976139][T20905] autoremove_wake_function+0x16/0x150 [ 2719.976183][T20905] __wake_up_common+0x140/0x5a0 [ 2719.976229][T20905] __wake_up_common_lock+0xd6/0x140 [ 2719.976274][T20905] ? __wake_up_common+0x5a0/0x5a0 [ 2719.976315][T20905] ? __zone_watermark_ok+0x24a/0x4c0 [ 2719.976367][T20905] ? pgdat_balanced+0x1ae/0x200 [ 2719.976411][T20905] wakeup_kswapd+0x452/0x5f0 [ 2719.976443][T20905] get_page_from_freelist+0x721/0x31e0 [ 2719.976496][T20905] ? __lock_acquire+0x182f/0x5de0 [ 2719.976543][T20905] ? __zone_watermark_ok+0x4c0/0x4c0 [ 2719.976590][T20905] ? prepare_alloc_pages.constprop.0+0x40c/0x550 [ 2719.976639][T20905] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 2719.976687][T20905] __alloc_pages+0x1d0/0x4a0 [ 2719.976741][T20905] ? __alloc_pages_slowpath.constprop.0+0x2360/0x2360 [ 2719.976794][T20905] ? lock_acquire+0x1ae/0x510 [ 2719.976839][T20905] ? __slab_alloc.constprop.0+0x56/0xa0 [ 2719.976886][T20905] alloc_pages+0x1a9/0x270 [ 2719.976929][T20905] allocate_slab+0x24e/0x380 [ 2719.976967][T20905] ? cpuset_node_allowed+0x41/0x610 [ 2719.995760][T28651] 8021q: adding VLAN 0 to HW filter on device bond1276 [ 2719.997342][T20905] ___slab_alloc+0x8bc/0x1570 [ 2719.997393][T20905] ? __alloc_skb+0x12b/0x330 [ 2719.997450][T20905] ? __alloc_skb+0x12b/0x330 [ 2720.151226][T20905] ? __slab_alloc.constprop.0+0x56/0xa0 [ 2720.156836][T20905] __slab_alloc.constprop.0+0x56/0xa0 [ 2720.162273][T20905] __kmem_cache_alloc_node+0x137/0x350 [ 2720.167788][T20905] ? __alloc_skb+0x12b/0x330 [ 2720.172442][T20905] ? __alloc_skb+0x12b/0x330 [ 2720.177095][T20905] __kmalloc_node_track_caller+0x4d/0x100 [ 2720.182884][T20905] kmalloc_reserve+0xef/0x270 [ 2720.187622][T20905] __alloc_skb+0x12b/0x330 [ 2720.192108][T20905] ? __napi_build_skb+0x50/0x50 [ 2720.197053][T20905] ? kfree_skbmem+0xef/0x1b0 [ 2720.201718][T20905] nsim_dev_trap_report_work+0x29e/0xc70 19:48:34 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x89150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:34 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x1f00000000000000, 0x101bf) 19:48:34 executing program 3: r0 = socket(0x1c, 0x1, 0x0) connect$inet6(r0, &(0x7f0000000000)={0x1c, 0x1c, 0x1}, 0x1c) 19:48:34 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)=ANY=[@ANYBLOB, @ANYRES16=r2, @ANYBLOB="010000000000000000005a00000008000300", @ANYRES32=r3, @ANYBLOB], 0x1c}}, 0x0) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x1000008, 0x80010, r1, 0xdd5ff000) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) (async) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)=ANY=[@ANYBLOB, @ANYRES16=r2, @ANYBLOB="010000000000000000005a00000008000300", @ANYRES32=r3, @ANYBLOB], 0x1c}}, 0x0) (async) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x1000008, 0x80010, r1, 0xdd5ff000) (async) 19:48:34 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfffffff0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2720.207438][T20905] process_one_work+0xaa2/0x16f0 [ 2720.212446][T20905] ? mld_report_work+0xcb0/0xcb0 [ 2720.215494][T28653] bond1276: (slave bridge1205): making interface the new active one [ 2720.217490][T20905] ? pwq_dec_nr_in_flight+0x2a0/0x2a0 [ 2720.217541][T20905] ? spin_bug+0x1d0/0x1d0 [ 2720.217591][T20905] worker_thread+0x687/0x1110 [ 2720.240032][T20905] ? __kthread_parkme+0x152/0x220 [ 2720.241490][T28653] bridge1205: entered promiscuous mode [ 2720.245085][T20905] ? process_one_work+0x16f0/0x16f0 [ 2720.245128][T20905] kthread+0x33a/0x430 [ 2720.245160][T20905] ? kthread_complete_and_exit+0x40/0x40 [ 2720.245200][T20905] ret_from_fork+0x2c/0x70 [ 2720.245239][T20905] ? kthread_complete_and_exit+0x40/0x40 [ 2720.245276][T20905] ret_from_fork_asm+0x11/0x20 [ 2720.245354][T20905] RIP: 0000:0x0 [ 2720.245381][T20905] Code: Unable to access opcode bytes at 0xffffffffffffffd6. [ 2720.245396][T20905] RSP: 0000:0000000000000000 EFLAGS: 00000000 ORIG_RAX: 0000000000000000 [ 2720.245425][T20905] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 [ 2720.245443][T20905] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 [ 2720.245462][T20905] RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 [ 2720.245480][T20905] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 [ 2720.245499][T20905] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 [ 2720.245528][T20905] 19:48:34 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8ee70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2720.381595][T28653] bond1276: (slave bridge1205): Enslaving as an active interface with an up link 19:48:34 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x3f00000000000000, 0x101bf) 19:48:34 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)=ANY=[@ANYBLOB, @ANYRES16=r2, @ANYBLOB="010000000000000000005a00000008000300", @ANYRES32=r3, @ANYBLOB], 0x1c}}, 0x0) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x1000008, 0x80010, r1, 0xdd5ff000) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) (async) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)=ANY=[@ANYBLOB, @ANYRES16=r2, @ANYBLOB="010000000000000000005a00000008000300", @ANYRES32=r3, @ANYBLOB], 0x1c}}, 0x0) (async) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x1000008, 0x80010, r1, 0xdd5ff000) (async) [ 2720.474476][T28666] validate_nla: 4 callbacks suppressed [ 2720.474518][T28666] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:34 executing program 3: r0 = socket$inet_sctp(0x2, 0x5, 0x84) getsockopt$inet_sctp_SCTP_RESET_STREAMS(r0, 0x84, 0x901, &(0x7f0000000180)=ANY=[], &(0x7f00000001c0)=0xc) 19:48:34 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0x9100000000000000, 0x101bf) 19:48:34 executing program 3: r0 = socket$inet6_tcp(0x1c, 0x1, 0x0) sendto$inet6(r0, 0x0, 0x0, 0x0, &(0x7f0000000080)={0x1c, 0x1c}, 0x1c) 19:48:34 executing program 0: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f00000001c0)='blkio.bfq.io_serviced\x00', 0x0, 0x0) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.throttle.io_service_bytes\x00', 0x0, 0x0) r2 = openat$cgroup(0xffffffffffffffff, &(0x7f00000000c0)='syz1\x00', 0x200002, 0x0) r3 = openat$cgroup_ro(r1, &(0x7f0000000080)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) r4 = openat$cgroup_ro(r0, &(0x7f0000000100)='hugetlb.1GB.rsvd.usage_in_bytes\x00', 0x0, 0x0) openat$cgroup_ro(r3, &(0x7f0000000180)='blkio.bfq.io_service_bytes\x00', 0x0, 0x0) r5 = openat$cgroup_ro(r4, &(0x7f0000000200)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000380), 0x101bf) openat$cgroup_ro(r2, &(0x7f0000000140)='cpu.stat\x00', 0x0, 0x0) [ 2720.651633][T28666] bond1231: entered promiscuous mode [ 2720.657764][T28666] 8021q: adding VLAN 0 to HW filter on device bond1231 [ 2720.672923][T28674] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2720.741186][T28674] bond1160: entered promiscuous mode [ 2720.747382][T28674] 8021q: adding VLAN 0 to HW filter on device bond1160 19:48:34 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8a150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:34 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xbf01000000000000, 0x101bf) 19:48:34 executing program 0: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f00000001c0)='blkio.bfq.io_serviced\x00', 0x0, 0x0) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.throttle.io_service_bytes\x00', 0x0, 0x0) r2 = openat$cgroup(0xffffffffffffffff, &(0x7f00000000c0)='syz1\x00', 0x200002, 0x0) r3 = openat$cgroup_ro(r1, &(0x7f0000000080)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) r4 = openat$cgroup_ro(r0, &(0x7f0000000100)='hugetlb.1GB.rsvd.usage_in_bytes\x00', 0x0, 0x0) openat$cgroup_ro(r3, &(0x7f0000000180)='blkio.bfq.io_service_bytes\x00', 0x0, 0x0) r5 = openat$cgroup_ro(r4, &(0x7f0000000200)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000380), 0x101bf) openat$cgroup_ro(r2, &(0x7f0000000140)='cpu.stat\x00', 0x0, 0x0) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r0, &(0x7f00000001c0)='blkio.bfq.io_serviced\x00', 0x0, 0x0) (async) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) (async) openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.throttle.io_service_bytes\x00', 0x0, 0x0) (async) openat$cgroup(0xffffffffffffffff, &(0x7f00000000c0)='syz1\x00', 0x200002, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000080)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) (async) openat$cgroup_ro(r0, &(0x7f0000000100)='hugetlb.1GB.rsvd.usage_in_bytes\x00', 0x0, 0x0) (async) openat$cgroup_ro(r3, &(0x7f0000000180)='blkio.bfq.io_service_bytes\x00', 0x0, 0x0) (async) openat$cgroup_ro(r4, &(0x7f0000000200)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) (async) write$cgroup_int(r5, &(0x7f0000000380), 0x101bf) (async) openat$cgroup_ro(r2, &(0x7f0000000140)='cpu.stat\x00', 0x0, 0x0) (async) 19:48:34 executing program 3: r0 = socket$inet_sctp(0x2, 0x5, 0x84) setsockopt$sock_int(r0, 0xffff, 0x1003, &(0x7f0000000140)=0x10001, 0x4) [ 2720.830220][T28673] bond1231: (slave bridge1133): making interface the new active one [ 2720.838915][T28673] bridge1133: entered promiscuous mode [ 2720.852097][T28673] bond1231: (slave bridge1133): Enslaving as an active interface with an up link [ 2720.870861][T28678] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2720.993129][T28678] bond1277: entered promiscuous mode [ 2721.002092][T28678] 8021q: adding VLAN 0 to HW filter on device bond1277 [ 2721.058652][T28679] bond1160: (slave bridge1089): making interface the new active one [ 2721.067515][T28679] bridge1089: entered promiscuous mode [ 2721.090607][T28679] bond1160: (slave bridge1089): Enslaving as an active interface with an up link 19:48:35 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xfffffffe, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:35 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8f959284, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:35 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xcec5040000000000, 0x101bf) 19:48:35 executing program 0: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f00000001c0)='blkio.bfq.io_serviced\x00', 0x0, 0x0) (async) openat$cgroup_ro(r0, &(0x7f00000001c0)='blkio.bfq.io_serviced\x00', 0x0, 0x0) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) (async) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.throttle.io_service_bytes\x00', 0x0, 0x0) (async) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.throttle.io_service_bytes\x00', 0x0, 0x0) r2 = openat$cgroup(0xffffffffffffffff, &(0x7f00000000c0)='syz1\x00', 0x200002, 0x0) r3 = openat$cgroup_ro(r1, &(0x7f0000000080)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) r4 = openat$cgroup_ro(r0, &(0x7f0000000100)='hugetlb.1GB.rsvd.usage_in_bytes\x00', 0x0, 0x0) openat$cgroup_ro(r3, &(0x7f0000000180)='blkio.bfq.io_service_bytes\x00', 0x0, 0x0) r5 = openat$cgroup_ro(r4, &(0x7f0000000200)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000380), 0x101bf) openat$cgroup_ro(r2, &(0x7f0000000140)='cpu.stat\x00', 0x0, 0x0) 19:48:35 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x14, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x7d}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) [ 2721.168034][T28681] bond1277: (slave bridge1206): making interface the new active one [ 2721.176772][T28681] bridge1206: entered promiscuous mode [ 2721.191492][T28681] bond1277: (slave bridge1206): Enslaving as an active interface with an up link 19:48:35 executing program 3: bpf$BPF_BTF_LOAD(0x12, &(0x7f00000002c0)={&(0x7f0000000400)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0x18, 0x18, 0x6, [@struct={0x0, 0x1, 0x0, 0x4, 0x0, 0x0, [{0x1}]}]}, {0x0, [0x2e, 0x61, 0x2e, 0x2e]}}, &(0x7f00000004c0)=""/4096, 0x36, 0x1000, 0x1}, 0x20) 19:48:35 executing program 0: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='cgroup.freeze\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000380), 0x101bf) [ 2721.277346][T28717] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:35 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xe9ca27d18c7f0000, 0x101bf) [ 2721.399921][T28717] bond1232: entered promiscuous mode [ 2721.433651][T28717] 8021q: adding VLAN 0 to HW filter on device bond1232 [ 2721.489566][T28718] bond1232: (slave bridge1134): making interface the new active one [ 2721.497905][T28718] bridge1134: entered promiscuous mode [ 2721.517092][T28718] bond1232: (slave bridge1134): Enslaving as an active interface with an up link [ 2721.534564][T28720] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 19:48:35 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8a600000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:35 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x14, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x6}]}, &(0x7f0000000140)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x8, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:35 executing program 0: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) (async) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='cgroup.freeze\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000380), 0x101bf) 19:48:35 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xf4ffffff00000000, 0x101bf) [ 2721.621234][T28720] bond1161: entered promiscuous mode [ 2721.628263][T28720] 8021q: adding VLAN 0 to HW filter on device bond1161 [ 2721.721248][T28721] bond1161: (slave bridge1090): making interface the new active one [ 2721.730533][T28721] bridge1090: entered promiscuous mode [ 2721.750474][T28721] bond1161: (slave bridge1090): Enslaving as an active interface with an up link [ 2721.760233][T28729] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:35 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xffffffff, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2721.809260][T28729] bond1278: entered promiscuous mode [ 2721.814961][T28729] 8021q: adding VLAN 0 to HW filter on device bond1278 [ 2721.866585][T28735] bond1278: (slave bridge1207): making interface the new active one [ 2721.874915][T28735] bridge1207: entered promiscuous mode [ 2721.886010][T28735] bond1278: (slave bridge1207): Enslaving as an active interface with an up link 19:48:35 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8fe70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:35 executing program 0: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) mkdirat$cgroup(r0, &(0x7f0000000040)='syz0\x00', 0x1ff) (async) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='cgroup.freeze\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000380), 0x101bf) 19:48:35 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x6, 0x0, 0x2}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:35 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfbffffff00000000, 0x101bf) [ 2721.924802][T28751] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:36 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x7, 0x6, 0xba0b}) write$cgroup_int(r1, &(0x7f0000000040)=0x80000000000ea3f, 0x5d9e) 19:48:36 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x78}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:36 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xfeffffff00000000, 0x101bf) [ 2722.112468][T28751] bond1233: entered promiscuous mode [ 2722.118404][T28751] 8021q: adding VLAN 0 to HW filter on device bond1233 [ 2722.240389][T28752] bond1233: (slave bridge1135): making interface the new active one [ 2722.249200][T28752] bridge1135: entered promiscuous mode [ 2722.262782][T28752] bond1233: (slave bridge1135): Enslaving as an active interface with an up link 19:48:36 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8b150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:36 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x7, 0x6, 0xba0b}) write$cgroup_int(r1, &(0x7f0000000040)=0x80000000000ea3f, 0x5d9e) 19:48:36 executing program 3: bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000100)={0x8, 0xa, &(0x7f0000000680)=@framed={{0x18, 0x8}, [@func={0x85, 0x0, 0x1, 0x0, 0x6}, @initr0, @generic={0x2c, 0x8}, @initr0, @exit]}, &(0x7f00000000c0)='syzkaller\x00', 0x4, 0xdb, &(0x7f0000000340)=""/219, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:36 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xffffff7f00000000, 0x101bf) [ 2722.285426][T28757] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2722.387358][T28757] bond1162: entered promiscuous mode [ 2722.397940][T28757] 8021q: adding VLAN 0 to HW filter on device bond1162 19:48:36 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x6, 0x0, 0xb}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) [ 2722.513052][T28762] bond1162: (slave bridge1091): making interface the new active one [ 2722.521735][T28762] bridge1091: entered promiscuous mode [ 2722.536114][T28762] bond1162: (slave bridge1091): Enslaving as an active interface with an up link [ 2722.546897][T28764] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2722.655977][T28764] bond1279: entered promiscuous mode [ 2722.661793][T28764] 8021q: adding VLAN 0 to HW filter on device bond1279 [ 2722.754856][T28770] bond1279: (slave bridge1208): making interface the new active one [ 2722.763441][T28770] bridge1208: entered promiscuous mode [ 2722.778190][T28770] bond1279: (slave bridge1208): Enslaving as an active interface with an up link [ 2722.793708][T28784] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:36 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x90e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:36 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x7, 0x6, 0xba0b}) write$cgroup_int(r1, &(0x7f0000000040)=0x80000000000ea3f, 0x5d9e) 19:48:36 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x4, 0x0, 0x7}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:36 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380)=0xffffffff00000000, 0x101bf) 19:48:36 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:36 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x10c80) [ 2722.896477][T28784] bond1234: entered promiscuous mode [ 2722.913753][T28784] 8021q: adding VLAN 0 to HW filter on device bond1234 19:48:36 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x7fffffff, 0x7fad, 0xfffffffffffffbff}) sendmsg$NL80211_CMD_DEL_KEY(r1, &(0x7f0000000100)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x38, 0x0, 0x200, 0x70bd29, 0x25dfdbfd, {{}, {@val={0x8}, @val={0xc, 0x99, {0x8000, 0x6b}}}}, [@NL80211_ATTR_KEY_IDX={0x5, 0x8, 0x3}, @NL80211_ATTR_KEY_TYPE={0x8, 0x37, 0x2}]}, 0x38}, 0x1, 0x0, 0x0, 0x4008000}, 0x4040000) [ 2722.978328][T28790] bond1234: (slave bridge1136): making interface the new active one [ 2722.987406][T28790] bridge1136: entered promiscuous mode [ 2722.998947][T28790] bond1234: (slave bridge1136): Enslaving as an active interface with an up link 19:48:37 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8c150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:37 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x15, 0x4, &(0x7f0000000080)=@framed={{}, [@ldst={0x3, 0x0, 0x6, 0x0, 0x0, 0x0, 0x1}]}, &(0x7f0000000140)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:37 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async, rerun: 32) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x7fffffff, 0x7fad, 0xfffffffffffffbff}) (rerun: 32) sendmsg$NL80211_CMD_DEL_KEY(r1, &(0x7f0000000100)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x38, 0x0, 0x200, 0x70bd29, 0x25dfdbfd, {{}, {@val={0x8}, @val={0xc, 0x99, {0x8000, 0x6b}}}}, [@NL80211_ATTR_KEY_IDX={0x5, 0x8, 0x3}, @NL80211_ATTR_KEY_TYPE={0x8, 0x37, 0x2}]}, 0x38}, 0x1, 0x0, 0x0, 0x4008000}, 0x4040000) 19:48:37 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x14, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x4d}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:37 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0xfffffdef) [ 2723.240482][T28803] bond1163: entered promiscuous mode [ 2723.284919][T28803] 8021q: adding VLAN 0 to HW filter on device bond1163 [ 2723.494894][T28805] bond1280: entered promiscuous mode [ 2723.526774][T28805] 8021q: adding VLAN 0 to HW filter on device bond1280 [ 2723.707485][T28807] bond1163: (slave bridge1092): making interface the new active one [ 2723.757512][T28807] bridge1092: entered promiscuous mode [ 2723.801688][T28807] bond1163: (slave bridge1092): Enslaving as an active interface with an up link [ 2723.959018][T28808] bond1280: (slave bridge1209): making interface the new active one [ 2724.008007][T28808] bridge1209: entered promiscuous mode [ 2724.040441][T28808] bond1280: (slave bridge1209): Enslaving as an active interface with an up link 19:48:38 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x91e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:38 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x7d}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:38 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x7fffffff, 0x7fad, 0xfffffffffffffbff}) sendmsg$NL80211_CMD_DEL_KEY(r1, &(0x7f0000000100)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x38, 0x0, 0x200, 0x70bd29, 0x25dfdbfd, {{}, {@val={0x8}, @val={0xc, 0x99, {0x8000, 0x6b}}}}, [@NL80211_ATTR_KEY_IDX={0x5, 0x8, 0x3}, @NL80211_ATTR_KEY_TYPE={0x8, 0x37, 0x2}]}, 0x38}, 0x1, 0x0, 0x0, 0x4008000}, 0x4040000) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) (async) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x7fffffff, 0x7fad, 0xfffffffffffffbff}) (async) sendmsg$NL80211_CMD_DEL_KEY(r1, &(0x7f0000000100)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x38, 0x0, 0x200, 0x70bd29, 0x25dfdbfd, {{}, {@val={0x8}, @val={0xc, 0x99, {0x8000, 0x6b}}}}, [@NL80211_ATTR_KEY_IDX={0x5, 0x8, 0x3}, @NL80211_ATTR_KEY_TYPE={0x8, 0x37, 0x2}]}, 0x38}, 0x1, 0x0, 0x0, 0x4008000}, 0x4040000) (async) 19:48:38 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x2}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:38 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x5, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x1}]}, &(0x7f0000000140)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:38 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r4, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r5}, @void}}}, 0x1c}}, 0x0) getpeername$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @random}, &(0x7f00000000c0)=0x14) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(0xffffffffffffffff, 0x89f0, &(0x7f00000001c0)={'erspan0\x00', &(0x7f0000000100)={'syztnl1\x00', 0x0, 0x3c08, 0x10, 0x1, 0x20, {{0x1c, 0x4, 0x0, 0x3e, 0x70, 0x66, 0x0, 0x7f, 0x29, 0x0, @multicast2, @broadcast, {[@timestamp_addr={0x44, 0x3c, 0x8c, 0x1, 0xe, [{@multicast1}, {@private=0xa010101, 0x101}, {@empty, 0xfff}, {@multicast1, 0x4}, {@empty, 0x2}, {@empty, 0x12a4}, {@dev={0xac, 0x14, 0x14, 0x16}, 0xf323}]}, @timestamp_addr={0x44, 0xc, 0x7a, 0x1, 0x8, [{@dev={0xac, 0x14, 0x14, 0x2d}, 0x2d}]}, @lsrr={0x83, 0x13, 0x5d, [@dev={0xac, 0x14, 0x14, 0x1f}, @initdev={0xac, 0x1e, 0x0, 0x0}, @multicast1, @remote]}, @end]}}}}}) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(r0, 0x89f0, &(0x7f0000000280)={'sit0\x00', &(0x7f0000000200)={'gretap0\x00', 0x0, 0x0, 0xd2ed9bd97520c8ea, 0x1, 0x9, {{0xa, 0x4, 0x1, 0x2, 0x28, 0x64, 0x0, 0x7, 0x29, 0x0, @multicast2, @loopback, {[@lsrr={0x83, 0x13, 0x72, [@multicast2, @broadcast, @empty, @private=0xa010101]}]}}}}}) ioctl$sock_ipv6_tunnel_SIOCCHGTUNNEL(r0, 0x89f3, &(0x7f00000003c0)={'syztnl1\x00', &(0x7f0000000300)={'syztnl2\x00', 0x0, 0x29, 0x7f, 0x1, 0x0, 0x6, @private1={0xfc, 0x1, '\x00', 0x1}, @remote, 0x7800, 0x40, 0x3, 0x6}}) r10 = socket$nl_route(0x10, 0x3, 0x0) r11 = socket(0x1, 0x803, 0x0) getsockname$packet(r11, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) sendmsg$nl_route(r10, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000600)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="04a0040000000000140003006272696467655f736c6176655f31000008000a00", @ANYRES32=r12], 0x3c}}, 0x0) r13 = socket$nl_route(0x10, 0x3, 0x0) r14 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r14, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r13, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="2000000011000d04000000000000000010000000", @ANYRES32=r15, @ANYBLOB="fbffffffffffffff"], 0x20}}, 0x0) sendmsg$TEAM_CMD_PORT_LIST_GET(r3, &(0x7f0000000440)={&(0x7f0000000040), 0xc, &(0x7f0000000400)={&(0x7f0000000980)={0x254, 0x0, 0x800, 0x70bd28, 0x25dfdbfd, {}, [{{0x8}, {0x178, 0x2, 0x0, 0x1, [{0x3c, 0x1, @user_linkup_enabled={{{0x24}, {0x5}, {0x4}}, {0x8}}}, {0x3c, 0x1, @enabled={{{0x24}, {0x5}, {0x4}}, {0x8}}}, {0x40, 0x1, @lb_hash_stats={{{0x24}, {0x5}, {0x8, 0x4, 0x5}}, {0x8}}}, {0x40, 0x1, @queue_id={{{0x24}, {0x5}, {0x8, 0x4, 0x6}}, {0x8, 0x6, r6}}}, {0x3c, 0x1, @user_linkup={{{0x24}, {0x5}, {0x4}}, {0x8, 0x6, r7}}}, {0x40, 0x1, @lb_port_stats={{{0x24}, {0x5}, {0x8, 0x4, 0x2}}, {0x8, 0x6, r8}}}]}}, {{0x8, 0x1, r9}, {0x74, 0x2, 0x0, 0x1, [{0x38, 0x1, @activeport={{0x24}, {0x5}, {0x8, 0x4, r12}}}, {0x38, 0x1, @notify_peers_count={{0x24}, {0x5}, {0x8, 0x4, 0xfffffffc}}}]}}, {{0x8, 0x1, r15}, {0x3c, 0x2, 0x0, 0x1, [{0x38, 0x1, @mcast_rejoin_count={{0x24}, {0x5}, {0x8, 0x4, 0x4}}}]}}]}, 0x254}, 0x1, 0x0, 0x0, 0x20000000}, 0x4004001) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$ETHTOOL_MSG_LINKMODES_SET(r11, &(0x7f0000000540)={&(0x7f0000000480)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000500)={&(0x7f00000004c0)={0x34, 0x0, 0x0, 0x70bd2d, 0x25dfdbff, {}, [@ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0xfffffff8}, @ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0xe8b}, @ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0x81}, @ETHTOOL_A_LINKMODES_LANES={0x8, 0x9, 0x3}]}, 0x34}, 0x1, 0x0, 0x0, 0x4044805}, 0x40) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r2, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r16}, @void}}}, 0x1c}}, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000000000)={0x0, 0x3, 0x10000, 0x1000}) 19:48:38 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0xfffffffffffffdef) 19:48:38 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8d150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:38 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x3, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x21}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:38 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x14, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x2}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:38 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x6, 0x0, 0xd}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) [ 2724.173781][T28820] workqueue: Failed to create a rescuer kthread for wq "bond1235": -EINTR [ 2724.456832][T28833] bond1164: entered promiscuous mode [ 2724.547737][T28833] 8021q: adding VLAN 0 to HW filter on device bond1164 19:48:38 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2724.773879][T28839] bond1164: (slave bridge1093): making interface the new active one [ 2724.803741][T28839] bridge1093: entered promiscuous mode [ 2724.828102][T28839] bond1164: (slave bridge1093): Enslaving as an active interface with an up link [ 2724.920987][T28841] bond1281: entered promiscuous mode [ 2724.947691][T28841] 8021q: adding VLAN 0 to HW filter on device bond1281 [ 2725.257781][T28845] bond1281: (slave bridge1210): making interface the new active one [ 2725.283557][T28845] bridge1210: entered promiscuous mode [ 2725.307390][T28845] bond1281: (slave bridge1210): Enslaving as an active interface with an up link 19:48:39 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x92e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:39 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x6, 0x0, 0xd}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:39 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:48:39 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x3}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:39 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2725.528590][T28858] bond1235: entered promiscuous mode [ 2725.569790][T28858] 8021q: adding VLAN 0 to HW filter on device bond1235 19:48:39 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) (async) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r4, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r5}, @void}}}, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r4, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r5}, @void}}}, 0x1c}}, 0x0) getpeername$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @random}, &(0x7f00000000c0)=0x14) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(0xffffffffffffffff, 0x89f0, &(0x7f00000001c0)={'erspan0\x00', &(0x7f0000000100)={'syztnl1\x00', 0x0, 0x3c08, 0x10, 0x1, 0x20, {{0x1c, 0x4, 0x0, 0x3e, 0x70, 0x66, 0x0, 0x7f, 0x29, 0x0, @multicast2, @broadcast, {[@timestamp_addr={0x44, 0x3c, 0x8c, 0x1, 0xe, [{@multicast1}, {@private=0xa010101, 0x101}, {@empty, 0xfff}, {@multicast1, 0x4}, {@empty, 0x2}, {@empty, 0x12a4}, {@dev={0xac, 0x14, 0x14, 0x16}, 0xf323}]}, @timestamp_addr={0x44, 0xc, 0x7a, 0x1, 0x8, [{@dev={0xac, 0x14, 0x14, 0x2d}, 0x2d}]}, @lsrr={0x83, 0x13, 0x5d, [@dev={0xac, 0x14, 0x14, 0x1f}, @initdev={0xac, 0x1e, 0x0, 0x0}, @multicast1, @remote]}, @end]}}}}}) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(r0, 0x89f0, &(0x7f0000000280)={'sit0\x00', &(0x7f0000000200)={'gretap0\x00', 0x0, 0x0, 0xd2ed9bd97520c8ea, 0x1, 0x9, {{0xa, 0x4, 0x1, 0x2, 0x28, 0x64, 0x0, 0x7, 0x29, 0x0, @multicast2, @loopback, {[@lsrr={0x83, 0x13, 0x72, [@multicast2, @broadcast, @empty, @private=0xa010101]}]}}}}}) ioctl$sock_ipv6_tunnel_SIOCCHGTUNNEL(r0, 0x89f3, &(0x7f00000003c0)={'syztnl1\x00', &(0x7f0000000300)={'syztnl2\x00', 0x0, 0x29, 0x7f, 0x1, 0x0, 0x6, @private1={0xfc, 0x1, '\x00', 0x1}, @remote, 0x7800, 0x40, 0x3, 0x6}}) r10 = socket$nl_route(0x10, 0x3, 0x0) r11 = socket(0x1, 0x803, 0x0) getsockname$packet(r11, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) (async) getsockname$packet(r11, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) sendmsg$nl_route(r10, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000600)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="04a0040000000000140003006272696467655f736c6176655f31000008000a00", @ANYRES32=r12], 0x3c}}, 0x0) r13 = socket$nl_route(0x10, 0x3, 0x0) socket$packet(0x11, 0x3, 0x300) (async) r14 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r14, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r13, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="2000000011000d04000000000000000010000000", @ANYRES32=r15, @ANYBLOB="fbffffffffffffff"], 0x20}}, 0x0) sendmsg$TEAM_CMD_PORT_LIST_GET(r3, &(0x7f0000000440)={&(0x7f0000000040), 0xc, &(0x7f0000000400)={&(0x7f0000000980)={0x254, 0x0, 0x800, 0x70bd28, 0x25dfdbfd, {}, [{{0x8}, {0x178, 0x2, 0x0, 0x1, [{0x3c, 0x1, @user_linkup_enabled={{{0x24}, {0x5}, {0x4}}, {0x8}}}, {0x3c, 0x1, @enabled={{{0x24}, {0x5}, {0x4}}, {0x8}}}, {0x40, 0x1, @lb_hash_stats={{{0x24}, {0x5}, {0x8, 0x4, 0x5}}, {0x8}}}, {0x40, 0x1, @queue_id={{{0x24}, {0x5}, {0x8, 0x4, 0x6}}, {0x8, 0x6, r6}}}, {0x3c, 0x1, @user_linkup={{{0x24}, {0x5}, {0x4}}, {0x8, 0x6, r7}}}, {0x40, 0x1, @lb_port_stats={{{0x24}, {0x5}, {0x8, 0x4, 0x2}}, {0x8, 0x6, r8}}}]}}, {{0x8, 0x1, r9}, {0x74, 0x2, 0x0, 0x1, [{0x38, 0x1, @activeport={{0x24}, {0x5}, {0x8, 0x4, r12}}}, {0x38, 0x1, @notify_peers_count={{0x24}, {0x5}, {0x8, 0x4, 0xfffffffc}}}]}}, {{0x8, 0x1, r15}, {0x3c, 0x2, 0x0, 0x1, [{0x38, 0x1, @mcast_rejoin_count={{0x24}, {0x5}, {0x8, 0x4, 0x4}}}]}}]}, 0x254}, 0x1, 0x0, 0x0, 0x20000000}, 0x4004001) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$ETHTOOL_MSG_LINKMODES_SET(r11, &(0x7f0000000540)={&(0x7f0000000480)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000500)={&(0x7f00000004c0)={0x34, 0x0, 0x0, 0x70bd2d, 0x25dfdbff, {}, [@ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0xfffffff8}, @ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0xe8b}, @ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0x81}, @ETHTOOL_A_LINKMODES_LANES={0x8, 0x9, 0x3}]}, 0x34}, 0x1, 0x0, 0x0, 0x4044805}, 0x40) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r2, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r16}, @void}}}, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r2, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r16}, @void}}}, 0x1c}}, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000000000)={0x0, 0x3, 0x10000, 0x1000}) 19:48:39 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x6, 0x0, 0xd}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) [ 2725.859713][T28861] bond1235: (slave bridge1137): making interface the new active one [ 2725.885339][T28861] bridge1137: entered promiscuous mode [ 2725.922321][T28861] bond1235: (slave bridge1137): Enslaving as an active interface with an up link 19:48:40 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8e000000, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:40 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='blkio.bfq.io_merged\x00', 0x275a, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) getsockname$packet(r0, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) connect$pppl2tp(r1, &(0x7f0000000040)=@pppol2tpv3in6={0x18, 0x1, {0x0, r0, 0x4, 0x3, 0x3, 0x0, {0xa, 0x4e22, 0x95b9, @local, 0x7}}}, 0x3a) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) 19:48:40 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x2, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x6, 0x0, 0xd}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) [ 2725.998670][T28874] validate_nla: 6 callbacks suppressed [ 2725.998696][T28874] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:40 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x14, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0xd}]}, &(0x7f0000000140)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:40 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='blkio.bfq.io_merged\x00', 0x275a, 0x0) (async) r1 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) (async) getsockname$packet(r0, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) connect$pppl2tp(r1, &(0x7f0000000040)=@pppol2tpv3in6={0x18, 0x1, {0x0, r0, 0x4, 0x3, 0x3, 0x0, {0xa, 0x4e22, 0x95b9, @local, 0x7}}}, 0x3a) (async) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) [ 2726.156785][T28874] bond1282: entered promiscuous mode [ 2726.173007][T28874] 8021q: adding VLAN 0 to HW filter on device bond1282 [ 2726.227672][T28876] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2726.302811][T28876] workqueue: Failed to create a rescuer kthread for wq "bond1165": -EINTR [ 2726.447219][T28878] bond1282: (slave bridge1211): making interface the new active one [ 2726.492254][T28878] bridge1211: entered promiscuous mode [ 2726.537029][T28878] bond1282: (slave bridge1211): Enslaving as an active interface with an up link 19:48:40 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x93e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:40 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x3, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x15}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:40 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='blkio.bfq.io_merged\x00', 0x275a, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) getsockname$packet(r0, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) connect$pppl2tp(r1, &(0x7f0000000040)=@pppol2tpv3in6={0x18, 0x1, {0x0, r0, 0x4, 0x3, 0x3, 0x0, {0xa, 0x4e22, 0x95b9, @local, 0x7}}}, 0x3a) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='blkio.bfq.io_merged\x00', 0x275a, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$nl_route_sched(r1, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) (async) getsockname$packet(r0, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) connect$pppl2tp(r1, &(0x7f0000000040)=@pppol2tpv3in6={0x18, 0x1, {0x0, r0, 0x4, 0x3, 0x3, 0x0, {0xa, 0x4e22, 0x95b9, @local, 0x7}}}, 0x3a) (async) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async) 19:48:40 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x4}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:40 executing program 3: bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000000)={0x3, 0x4, &(0x7f0000000180)=@framed={{}, [@jmp={0x5, 0x0, 0x8, 0x0, 0x0, 0x0, 0x57}]}, &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) 19:48:40 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r2, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000500)=ANY=[@ANYBLOB="3c00000010008506000000ff0100000000000000", @ANYRES32=r3, @ANYBLOB="00010000000000001c0012000c000100626f6e64"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000040)=@newqdisc={0x17c, 0x24, 0x400, 0x70bd2c, 0x25dfdbff, {0x0, 0x0, 0x0, 0x0, {0x9, 0xfff3}, {0xa, 0xa}, {0x1, 0xb}}, [@TCA_STAB={0xac, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x80, 0x1, 0x1, 0x200, 0x0, 0x6, 0x7, 0x9}}, {0x16, 0x2, [0xfc00, 0x0, 0xffff, 0x3, 0x0, 0xfff8, 0x9, 0x1, 0x2]}}, {{0x1c, 0x1, {0x0, 0x7, 0x1, 0x0, 0x0, 0x0, 0x2, 0x2}}, {0x8, 0x2, [0x6, 0x8]}}, {{0x1c, 0x1, {0x40, 0x92, 0x100, 0x7, 0x1, 0xfffffffe, 0x6, 0x2}}, {0x8, 0x2, [0x3, 0x5]}}, {{0x1c, 0x1, {0xea, 0x80, 0xffff, 0xe1e, 0x0, 0xd084, 0xfff, 0x5}}, {0xe, 0x2, [0x5b7, 0x7, 0x0, 0x0, 0x2]}}]}, @TCA_RATE={0x6, 0x5, {0x0, 0xd9}}, @TCA_RATE={0x6, 0x5, {0x3, 0x7f}}, @qdisc_kind_options=@q_pfifo_head_drop={{0x14}, {0x8, 0x2, 0x4}}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x9000}, @qdisc_kind_options=@q_plug={{0x9}, {0xc, 0x2, {0x0, 0x9}}}, @TCA_STAB={0x50, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x2, 0xa2, 0x14db, 0x4, 0x2, 0x0, 0xfff, 0x2}}, {0x8, 0x2, [0xffe0, 0xa2eb]}}, {{0x1c, 0x1, {0x4, 0x6, 0x95, 0x1f, 0x0, 0xfa, 0x9, 0x3}}, {0xa, 0x2, [0xffff, 0x0, 0x4]}}]}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x20}, @TCA_EGRESS_BLOCK={0x8}]}, 0x17c}, 0x1, 0x0, 0x0, 0x4008000}, 0x80) 19:48:40 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r4, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r5}, @void}}}, 0x1c}}, 0x0) (async) getpeername$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @random}, &(0x7f00000000c0)=0x14) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(0xffffffffffffffff, 0x89f0, &(0x7f00000001c0)={'erspan0\x00', &(0x7f0000000100)={'syztnl1\x00', 0x0, 0x3c08, 0x10, 0x1, 0x20, {{0x1c, 0x4, 0x0, 0x3e, 0x70, 0x66, 0x0, 0x7f, 0x29, 0x0, @multicast2, @broadcast, {[@timestamp_addr={0x44, 0x3c, 0x8c, 0x1, 0xe, [{@multicast1}, {@private=0xa010101, 0x101}, {@empty, 0xfff}, {@multicast1, 0x4}, {@empty, 0x2}, {@empty, 0x12a4}, {@dev={0xac, 0x14, 0x14, 0x16}, 0xf323}]}, @timestamp_addr={0x44, 0xc, 0x7a, 0x1, 0x8, [{@dev={0xac, 0x14, 0x14, 0x2d}, 0x2d}]}, @lsrr={0x83, 0x13, 0x5d, [@dev={0xac, 0x14, 0x14, 0x1f}, @initdev={0xac, 0x1e, 0x0, 0x0}, @multicast1, @remote]}, @end]}}}}}) (async) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(r0, 0x89f0, &(0x7f0000000280)={'sit0\x00', &(0x7f0000000200)={'gretap0\x00', 0x0, 0x0, 0xd2ed9bd97520c8ea, 0x1, 0x9, {{0xa, 0x4, 0x1, 0x2, 0x28, 0x64, 0x0, 0x7, 0x29, 0x0, @multicast2, @loopback, {[@lsrr={0x83, 0x13, 0x72, [@multicast2, @broadcast, @empty, @private=0xa010101]}]}}}}}) ioctl$sock_ipv6_tunnel_SIOCCHGTUNNEL(r0, 0x89f3, &(0x7f00000003c0)={'syztnl1\x00', &(0x7f0000000300)={'syztnl2\x00', 0x0, 0x29, 0x7f, 0x1, 0x0, 0x6, @private1={0xfc, 0x1, '\x00', 0x1}, @remote, 0x7800, 0x40, 0x3, 0x6}}) r10 = socket$nl_route(0x10, 0x3, 0x0) (async) r11 = socket(0x1, 0x803, 0x0) getsockname$packet(r11, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000002c0)=0x14) sendmsg$nl_route(r10, &(0x7f0000000300)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000600)=ANY=[@ANYBLOB="3c0000001000010400"/20, @ANYRES32=0x0, @ANYBLOB="04a0040000000000140003006272696467655f736c6176655f31000008000a00", @ANYRES32=r12], 0x3c}}, 0x0) (async) r13 = socket$nl_route(0x10, 0x3, 0x0) (async) r14 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r14, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r13, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000340)=ANY=[@ANYBLOB="2000000011000d04000000000000000010000000", @ANYRES32=r15, @ANYBLOB="fbffffffffffffff"], 0x20}}, 0x0) (async) sendmsg$TEAM_CMD_PORT_LIST_GET(r3, &(0x7f0000000440)={&(0x7f0000000040), 0xc, &(0x7f0000000400)={&(0x7f0000000980)={0x254, 0x0, 0x800, 0x70bd28, 0x25dfdbfd, {}, [{{0x8}, {0x178, 0x2, 0x0, 0x1, [{0x3c, 0x1, @user_linkup_enabled={{{0x24}, {0x5}, {0x4}}, {0x8}}}, {0x3c, 0x1, @enabled={{{0x24}, {0x5}, {0x4}}, {0x8}}}, {0x40, 0x1, @lb_hash_stats={{{0x24}, {0x5}, {0x8, 0x4, 0x5}}, {0x8}}}, {0x40, 0x1, @queue_id={{{0x24}, {0x5}, {0x8, 0x4, 0x6}}, {0x8, 0x6, r6}}}, {0x3c, 0x1, @user_linkup={{{0x24}, {0x5}, {0x4}}, {0x8, 0x6, r7}}}, {0x40, 0x1, @lb_port_stats={{{0x24}, {0x5}, {0x8, 0x4, 0x2}}, {0x8, 0x6, r8}}}]}}, {{0x8, 0x1, r9}, {0x74, 0x2, 0x0, 0x1, [{0x38, 0x1, @activeport={{0x24}, {0x5}, {0x8, 0x4, r12}}}, {0x38, 0x1, @notify_peers_count={{0x24}, {0x5}, {0x8, 0x4, 0xfffffffc}}}]}}, {{0x8, 0x1, r15}, {0x3c, 0x2, 0x0, 0x1, [{0x38, 0x1, @mcast_rejoin_count={{0x24}, {0x5}, {0x8, 0x4, 0x4}}}]}}]}, 0x254}, 0x1, 0x0, 0x0, 0x20000000}, 0x4004001) (async) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) (async) sendmsg$ETHTOOL_MSG_LINKMODES_SET(r11, &(0x7f0000000540)={&(0x7f0000000480)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000500)={&(0x7f00000004c0)={0x34, 0x0, 0x0, 0x70bd2d, 0x25dfdbff, {}, [@ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0xfffffff8}, @ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0xe8b}, @ETHTOOL_A_LINKMODES_SPEED={0x8, 0x5, 0x81}, @ETHTOOL_A_LINKMODES_LANES={0x8, 0x9, 0x3}]}, 0x34}, 0x1, 0x0, 0x0, 0x4044805}, 0x40) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r2, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r16}, @void}}}, 0x1c}}, 0x0) (async) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000000000)={0x0, 0x3, 0x10000, 0x1000}) [ 2726.716309][T28896] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2726.923102][T28896] bond1236: entered promiscuous mode [ 2726.962801][T28896] 8021q: adding VLAN 0 to HW filter on device bond1236 [ 2727.137084][T28899] bond1236: (slave bridge1138): making interface the new active one [ 2727.157755][T28899] bridge1138: entered promiscuous mode 19:48:41 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8e150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:41 executing program 3: r0 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) setsockopt$inet6_group_source_req(r0, 0x29, 0x2e, &(0x7f0000000140)={0x0, {{0xa, 0x0, 0x0, @private2}}, {{0xa, 0x0, 0x0, @mcast2}}}, 0x108) [ 2727.186885][T28899] bond1236: (slave bridge1138): Enslaving as an active interface with an up link [ 2727.223052][T28916] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2727.362916][T28916] bond1283: entered promiscuous mode [ 2727.400612][T28916] 8021q: adding VLAN 0 to HW filter on device bond1283 [ 2727.665430][T28917] bond1283: (slave bridge1212): making interface the new active one [ 2727.684867][T28917] bridge1212: entered promiscuous mode 19:48:41 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x94e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2727.715305][T28917] bond1283: (slave bridge1212): Enslaving as an active interface with an up link [ 2727.742095][T28922] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2727.801536][T28922] workqueue: Failed to create a rescuer kthread for wq "bond1165": -EINTR [ 2727.979392][T28924] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.2'. 19:48:42 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x5}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:42 executing program 3: r0 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) sendmsg$inet6(r0, &(0x7f0000000080)={0x0, 0x0, 0x0}, 0x0) 19:48:42 executing program 0: r0 = syz_init_net_socket$802154_dgram(0x24, 0x2, 0x0) writev(r0, &(0x7f0000000140)=[{&(0x7f0000000980)="28421cd4a4a21fac3d10c0a4200b622f6b19b22dbd8f8ddf28226f71861812feb045f7b9136580d9b28baebf9da57697e575f1036b2d903e02a6fe986bf663617377a3762d615399d8a9337cc7ba8ad5100de04a7d5192cf2ab62824c1663d84ae2d13d53d99f15085c3355a0b63c0f40afa7c1d82536183c6c57ee095a47c41fd439a232f21ad1359f060f0aeb10fa084891e3d0adf0a259af9873f1353f9dcb854ad0b25509961d7ea68740302bf35be61b3201ff730bc1b5ca7ae5e2399d0e61fa5242c3b3580aafb7470d6d3fa2ee4ce651c223ef57ed1246527e29d1dbe707124af812930dcc4d034e67e9b67cc7ed5f6793b3cf251b357f6d629fde119d90dc6444729a179a59f6b2c7784454496e368e7ff874bf8843a879a21662ada589610d9b2bb3777c01a3e789f2b62feec567d0987bdb74ac1dd2d7cef3aa8afb6b90b3107aec38c06abc4fd7c5a7fb4aba3f8a9689f4e68eebe839c77763bd3be7b11f2492ce6eb5b8359d4a725e6a4792315a435641e19fd4905a2469e6d43f54f32abf8cc3b9c887496af24e2e6279ede5f7ccd2eef4685c8a6fe56be2268a40e66888c369c90b28907777e2f8252139d5b6e7387144cd9a5e14e3c1b6f8cd277537cdfd562e119f4feddbf7bc71980055648c39b2087212dea844be93b0b372d926ae4971df6c01886963226337f41bb8f6615b6a2ceb08d57256cc340d838630f5fa3a984374893aa2b6f58a7ecb0e091af7cbf9146c697c0611b5a86e5cdbebdd2bef5dd34a96376986df7d625d6ea162a3d407dcc4ed454759f3ceebb4485027a28d7571870f06c3daa7733132b5b8a6e7910707e734b62d16cfcb8a7f7c8779d4c884c8b78af62a3a8d9a5a6ed71a9ce81d60c0f0dcaf4e0ecb92396e971dc2809b53dd0a5ab08a248bc3552b8e6539dd9880a3e42870c02a587445a9e41548ddd21c8c8f0b9456db8e19432768559e3d91e05c747dbaec9debcb4307f2773980865d79a100e00edb2fd1079864e94d5d0415929a797d594813cbdc45c1b28856970ae7a78821e99355b8ac094566b9c49efbcb7f048579aea2b03ebd3efc6b4b6ae1779b918015b282bf8fe39eb832f56d3659f93f041094f09288d0c5505beeab13f3c423a08973515896ab432dbe011b474af580e8b59358ab54697fa9997546d1d419d82bad31007096d903afe3db61a280b7067c6963a8c900c21f1a0d1971b8a161dde78d955b2404f8a60af77259880d1c60049840ad3b3fe3cfe4a5c8ab94ecd294c42940c4fb80fed677b64e6c1fffeaed228f8ddf16cec0cc3175a4b9c1511e90684895c253ceffd97ebbd2d2c8ab6867355bce4f3b552ae8777a210bf0a604aa0af55eec4602d5395cba8fc267ed5e315b1468b1f4005c9b8fa8ca20cdde19d3d47f97768635ba2bb98f6b458b733421e80755858c42ecffadef8d2e67f6f41f7bfd6c96c7d17892f5d3a3ccbfbf11e9a663e7797b8ca758d686305b8b47e9392cd01841622a992639dc8590b39616a2dbf53817120c198469cbf772eaa34c7dfa5bd806909f6022661ae4df63ccdcff4a8279650df42d1a09aa2e63a52709f1b9236eee27941ecb2a11c90296251aede409334dc970d0e48336e6c03cc2a343d98b67be0f889e85329889d08869da17ec2e2d0fa0bfd94efe1a43b3ed970ee1a10d36f100597479731916bbc1b885c4bd8f08515cc34b87534374579b70365e816fe95899aa3c35756f9791dcf182b3e809b0f115b4a9f30bdd55d3af72481ecb8f903c072f8eb1c55d315ad4a1f69aa40ec81bbc2019cf74876187c21a7ed878c3957ec8a3ab328deb716d8aec74aefc09e927d9dfd3efcc7669f13660b001f87193b87113e190a6338a45fe25b1669264e3d42db7ec4bbe906390785b1e489c7f5ae0b31b51704e3e6ae34e69a7c1f7990e98355dde5c8da185d656ff5e6d150d0528f4f192e54d0f0754bdae561e124e13f166f8696f2708cced066a48cb67575a8be464b3fe9284523e0b22538c096ce4030278f8382ea7df5bb4923205c51d3e9da689c38932c98be63494ad7b5b04b60001d2689e769c9adf5d23967d5252da2c564579468ec59cfc64b7e03af6d21d14887d7a10eb4e0b3c7eaf9656ed28debc82149b21a94a4965d38a5f6aae496eb6fac7fbb0377a1702bbe3426ed339c11beabc09888b475a2a232a71b4344702c0c3f38c12bcaef772139f868f945ae62f3abf1930aa4c48d1ee94a21b26612387b8ddaf71397054482417f995d639db7a09d7d3a5db29d5ff03bc8cf64ff1f4d723900b0f6fcb5cd2ea327923cf543fc679bde0c430316e358578e8f4c0fc7680b5ac4a696b8d5a92aca6e772ee2a9f862c002e933bac9980832d0d14b09f7e7c97fdf92dca8a074ce157a04de7d17374a2729b5e431377331891918c883e823c91c393dd23513d210d4e9656aeb9f2dd9410ad82b04faf20f468017a7685a093ad3729fc7d6595d9cf07fbb9c65e801c9d2702aca9652e839658c772750a387f6c5b63994bfd0e270ce27c0077e54a7f007f416ec94e25d8bb31b80fedeb9b2def26335cdbffaef59a598903241d9fac7b003901824440c47396537e99555c0be3e4a2151df02fe85426b36daac69d3ab7378f64ce7acb28ea2aa2ad94ae55879a13a7483d3ca19809828505b9d560bd7adec288fad138e7bf6ed1752da88c09a65dadbd4aaa22a4a6457ad17654a4f794973ce09ce5d348b86e0d79d7d1febd3e185bed239f4d01db9165a9840b802187f97e475c357d45a69929a7e6e679b8c270df28ffc8e9de3266fea6899d67872e24a8e0dc3ab5eadf635ca894c2bb69d60d7fc48b7d3cb58b3d2378d1d11185086125cc80dfa390fc1293f526aa618285955f214478c0d325d85e9c9fa3a7df093fe49652c7d342cbaa6dc7052df54c41690dc80b642d830c41af0a52c807e173587cb22268fc03bd7a947fd531322819e511144f8d80bf7409ed30a76801de108f065b6f5765f372b38161b7097d9802a2f7b6dee57d771c3c8975e765ca3033a3ac229f9756d4c28b36b94cbca634b106f17f37e2534c7745ee65d013d7b6c8bf8e4809837c01494850a132a9ec6dc534186650f13df750b203e3a3a35bf88511be2716f99e50ce6887b59b78d2b3b19308de02b91867782fbce8fc16bcf366a3351f5a6e0e7f471b6bcfa3001e1d23789e146e36dd6281b6d64ac33448e76a0490bb71250dbfd29841e408404db983018ef046981c84f8ac607bd69a1f140520e3a234cbbbd964c754d6db11e049a29d0d07f79ff8e08152010c81080b996cfefbbe17aa9511fb1dcc259ff36bb518ad54fa27b842bb0ba9345ade654ddf5f2c91ba897e1117184b7c624adfe77e39741852487d7e62e5eeb4095701787e075411c10504ce28b8d43c77517283a95666f078223e9cf13e4b9e5ad6ec592134ea0873c378b6c6d2dab477bb2c1fcbab7ad49c48297bd0280cb5fb67dfd4a3a554a8d6e12c2a2811f35fa881f93af61c2c59fb53c89197d3536e419afc365d82841bf3bf4b90d7c649a4ea918820534104608fb97d02a4c4c1ecd2244807a2b450693afa417f95c09f20be2cc1e186ffb5f1b45ace36d9514831d5ef9871c5a2f148db35c3ddc31e124457625042d184d449e36b2e99ee40636dee6432a91e7e5b8dd932ad7ac843ee0aa5cf4c61d196dcc3873b5450205f2e869df6a18874164e69e5c9db594eddafae5b054ff7d364ae4dd6334a4146ef66d80c05f2bee0ae1e0a7b1a2bb04a1d43b5c9274c4dc9e7151c1d252297ca7a67de8f4f69640701c32922e11e324a7bc2cb738c09147316460e580144382ac0c00b6bdfccfa304ab1d643f3ae2cfe83b96e6d04d567e7569d0b87cc5ea33654ab79370e5c396e99b85ee982ba9d54230a0cbb0e4130e754840343e60288181da4bf74fe694ee912df5c6a13358e4bfd25632f7c00b5cce6ccc6ca2c3848d047d472d46188ca7698bddd08bde91ef541d8fdcd4ec25c3358bbdf5b945e2b55f2db0b406e1f43b984c9e3944179ade0f9ac2310fe825fcc970c6ffc0010000c7f2a38cdda1823cbe6279834ac390432b8a1029eee345b0b75954eb07dd65041eb4e1b84f51453fcf4485082a1ff23b60e444da32df63c618dab16020b131c319c1e4046c132d5bab8d2d333c7ba4482e8368d6e82ab1e8ca466cd50eb23d94df3a02d99d8d9d8639fa80087c6126b40664d22563bb73b23a463042eb1721102ed37c2b56e7d537f3b785e109c8a876c0a8122cab991f58fd59a2071281fbbdb9fb164e397576265d16cf7c6cc9cd96ae2d22c6aa11e34ea63f76b94838f05b648a310caff50b45b119fa3a7e646506ad3a77d04dd9c7ed692f2a930c14f80e248a02a98c9851fdeb774bf8ea72525f19f97b59cc89e7294bde549ad11541e62a2e68e4758f635141cf0376a3d8fa5d0abafe8659fbb9f1371d7ebe2f2d312557afa76f2376ffa0e0cce7084745ba97e643f958dd537f5e8c14e3bcba13bdd74197c22b1e5c714aabd0b7458af85db602f96b41f8323146c877c5a0e8817da93b16ed474000b1f24db0e4c872c3462b6ed920b22e9c0bd3d11ed7e43d207595aa74e593858faa7d931bede1e4a71bc2e469300ea01bc473a94578573332454f1c3e90062241d9907530ebd019f5a5a27ae5b24f0ecabda78bbdfb4f91af4c7aa2afe3e0c379b5e8f9c3a4b6f42b08d997fb71025a067480ae82d176006898ca5064cee7b51070b089ef1dc2beb6e93a3fc555b28da0a7c7452cac895913f4d43a64f1cb406078795641a4f5ce2afa01b880605fa581ec31bd0e3ca6392765feda52e8449ee5a2237c15ed0a9cc01d8543e68b7c5b4839352b9dc2a04d2afa19c496f7ff3ad28f18ab2b4e27d354f1cd7103c8d21344583bbb72cf78468af925a8a68649e37a6fefa942fb0f6478c5a9344b40dc2a86b1694ca29beca6d8c865df50926f0474dcdb1856260103305cd579ef4e1773e856c257457b0e93166a3584c96d9c21cd767001d678b5d3d87a73d285157931d42aa4d6e205f983578a84dd7cbf50dcac50f7a20cc577b1b76452de74d5479c362cef2e290590cd153b36ac1327c7ea2f3e68373363da3fa8de597262917981d5696d092cf62f44b973a33556138f2804c704dd15a9185327a9a3025c2b9bdd23af1eb9cdfaed2ebe865f8bea8d1055b0c69e4ab848ba0a9f2d13395122351e65d30c52a6a8fa1f94cf4210468f1814ad4625ac7696ac13a08914123fefc48921fbdaf700c6203d153a1cfcd9e83e6176959a0bbfe8735040e703e0a72ce0449d6ba70c27002b3d959e1c58dbbb63522a26913633aae332606faea8d8a970c384d268effb12e015c1079d6c3c1a6d74e43301930f8a0607f0cc6d2ff00b65cf53143fe7c67660375082b68abfcc557ae3c67755bbaa78bdc982f95efe2feeece78022167af00398264c0fc6c3085ad746b4c00b1c0bea37e8d8209fdd22c7e3aea3e7adb08b970c84f8fb970369b4bb5b66df9df5fc1025e95ced89e8a52259e6ac52c1d8653ac89fa14c52e53cee15920ba8e25e83ef1836971d5afcece817ca3175201c2f4923a3697e770739d8468e155cfb706967b108ce0f0866f81db8bb758c515800efc00775c379040b2f24019b511b9bf5944a3716f39d5d398ebf4da853a94765229bb63fc255c8ebcac5e7c0e8bcfa329ceb5b1be203e45717ab8b5f512ef8d4f80d2c7933449fd8e342529c7d943e23e635078f79a1340e2db2cd20ee736288", 0x1000}], 0x1) socket(0x15, 0x4, 0xf56) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000100)='./cgroup.cpu/syz0\x00', 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f00000002c0)='cpuacct.usage_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r4, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r5}, @void}}}, 0x1c}}, 0x0) sendmsg$NL80211_CMD_EXTERNAL_AUTH(r3, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)=ANY=[@ANYBLOB='4\x00\x00', @ANYRES16=0x0, @ANYBLOB="01002abd7000fddbdf257f00000008000300", @ANYRES32=0x0, @ANYBLOB="0c009900000000005c0000000a0034000202020202020000"], 0x34}, 0x1, 0x0, 0x0, 0x55cdcbd2fecf7229}, 0x55) r6 = socket(0x26, 0x6, 0x9) sendmsg$NL80211_CMD_SET_INTERFACE(r6, &(0x7f00000001c0)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x8000000}, 0xc, 0x0, 0x1, 0x0, 0x0, 0x200c0}, 0x90) 19:48:42 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r2, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000500)=ANY=[@ANYBLOB="3c00000010008506000000ff0100000000000000", @ANYRES32=r3, @ANYBLOB="00010000000000001c0012000c000100626f6e64"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000040)=@newqdisc={0x17c, 0x24, 0x400, 0x70bd2c, 0x25dfdbff, {0x0, 0x0, 0x0, 0x0, {0x9, 0xfff3}, {0xa, 0xa}, {0x1, 0xb}}, [@TCA_STAB={0xac, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x80, 0x1, 0x1, 0x200, 0x0, 0x6, 0x7, 0x9}}, {0x16, 0x2, [0xfc00, 0x0, 0xffff, 0x3, 0x0, 0xfff8, 0x9, 0x1, 0x2]}}, {{0x1c, 0x1, {0x0, 0x7, 0x1, 0x0, 0x0, 0x0, 0x2, 0x2}}, {0x8, 0x2, [0x6, 0x8]}}, {{0x1c, 0x1, {0x40, 0x92, 0x100, 0x7, 0x1, 0xfffffffe, 0x6, 0x2}}, {0x8, 0x2, [0x3, 0x5]}}, {{0x1c, 0x1, {0xea, 0x80, 0xffff, 0xe1e, 0x0, 0xd084, 0xfff, 0x5}}, {0xe, 0x2, [0x5b7, 0x7, 0x0, 0x0, 0x2]}}]}, @TCA_RATE={0x6, 0x5, {0x0, 0xd9}}, @TCA_RATE={0x6, 0x5, {0x3, 0x7f}}, @qdisc_kind_options=@q_pfifo_head_drop={{0x14}, {0x8, 0x2, 0x4}}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x9000}, @qdisc_kind_options=@q_plug={{0x9}, {0xc, 0x2, {0x0, 0x9}}}, @TCA_STAB={0x50, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x2, 0xa2, 0x14db, 0x4, 0x2, 0x0, 0xfff, 0x2}}, {0x8, 0x2, [0xffe0, 0xa2eb]}}, {{0x1c, 0x1, {0x4, 0x6, 0x95, 0x1f, 0x0, 0xfa, 0x9, 0x3}}, {0xa, 0x2, [0xffff, 0x0, 0x4]}}]}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x20}, @TCA_EGRESS_BLOCK={0x8}]}, 0x17c}, 0x1, 0x0, 0x0, 0x4008000}, 0x80) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) (async) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$nl_route_sched(r2, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) (async) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000500)=ANY=[@ANYBLOB="3c00000010008506000000ff0100000000000000", @ANYRES32=r3, @ANYBLOB="00010000000000001c0012000c000100626f6e64"], 0x3c}}, 0x0) (async) sendmsg$nl_route_sched(r1, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000040)=@newqdisc={0x17c, 0x24, 0x400, 0x70bd2c, 0x25dfdbff, {0x0, 0x0, 0x0, 0x0, {0x9, 0xfff3}, {0xa, 0xa}, {0x1, 0xb}}, [@TCA_STAB={0xac, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x80, 0x1, 0x1, 0x200, 0x0, 0x6, 0x7, 0x9}}, {0x16, 0x2, [0xfc00, 0x0, 0xffff, 0x3, 0x0, 0xfff8, 0x9, 0x1, 0x2]}}, {{0x1c, 0x1, {0x0, 0x7, 0x1, 0x0, 0x0, 0x0, 0x2, 0x2}}, {0x8, 0x2, [0x6, 0x8]}}, {{0x1c, 0x1, {0x40, 0x92, 0x100, 0x7, 0x1, 0xfffffffe, 0x6, 0x2}}, {0x8, 0x2, [0x3, 0x5]}}, {{0x1c, 0x1, {0xea, 0x80, 0xffff, 0xe1e, 0x0, 0xd084, 0xfff, 0x5}}, {0xe, 0x2, [0x5b7, 0x7, 0x0, 0x0, 0x2]}}]}, @TCA_RATE={0x6, 0x5, {0x0, 0xd9}}, @TCA_RATE={0x6, 0x5, {0x3, 0x7f}}, @qdisc_kind_options=@q_pfifo_head_drop={{0x14}, {0x8, 0x2, 0x4}}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x9000}, @qdisc_kind_options=@q_plug={{0x9}, {0xc, 0x2, {0x0, 0x9}}}, @TCA_STAB={0x50, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x2, 0xa2, 0x14db, 0x4, 0x2, 0x0, 0xfff, 0x2}}, {0x8, 0x2, [0xffe0, 0xa2eb]}}, {{0x1c, 0x1, {0x4, 0x6, 0x95, 0x1f, 0x0, 0xfa, 0x9, 0x3}}, {0xa, 0x2, [0xffff, 0x0, 0x4]}}]}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x20}, @TCA_EGRESS_BLOCK={0x8}]}, 0x17c}, 0x1, 0x0, 0x0, 0x4008000}, 0x80) (async) [ 2728.024371][T28924] workqueue: Failed to create a rescuer kthread for wq "bond68": -EINTR [ 2728.051004][T28938] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:42 executing program 3: r0 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) sendmsg$inet6(r0, &(0x7f0000000080)={0x0, 0x0, 0x0}, 0x0) 19:48:42 executing program 3: r0 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) sendmsg$inet6(r0, &(0x7f0000000080)={0x0, 0x0, 0x0}, 0x0) 19:48:42 executing program 3: r0 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) sendmsg$inet6(r0, &(0x7f0000000080)={0x0, 0x0, 0x0}, 0x0) 19:48:42 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8f150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2728.222846][T28938] workqueue: Failed to create a rescuer kthread for wq "bond1237": -EINTR [ 2728.395995][T28943] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 19:48:42 executing program 3: r0 = epoll_create1(0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) epoll_ctl$EPOLL_CTL_ADD(r0, 0x1, r1, &(0x7f00000000c0)) 19:48:42 executing program 3: r0 = epoll_create1(0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) epoll_ctl$EPOLL_CTL_ADD(r0, 0x1, r1, &(0x7f00000000c0)) [ 2728.580181][T28943] bond1284: entered promiscuous mode [ 2728.639998][T28943] 8021q: adding VLAN 0 to HW filter on device bond1284 19:48:42 executing program 3: r0 = epoll_create1(0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) epoll_ctl$EPOLL_CTL_ADD(r0, 0x1, r1, &(0x7f00000000c0)) [ 2728.921104][T28944] bond1284: (slave bridge1213): making interface the new active one [ 2728.962277][T28944] bridge1213: entered promiscuous mode [ 2729.001767][T28944] bond1284: (slave bridge1213): Enslaving as an active interface with an up link 19:48:43 executing program 3: r0 = epoll_create1(0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) epoll_ctl$EPOLL_CTL_ADD(r0, 0x1, r1, &(0x7f00000000c0)) [ 2729.044351][T28951] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2729.256387][T28951] bond1165: entered promiscuous mode [ 2729.276277][T28951] 8021q: adding VLAN 0 to HW filter on device bond1165 [ 2729.303330][T28955] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.2'. [ 2729.326916][T28955] workqueue: Failed to create a rescuer kthread for wq "bond68": -EINTR [ 2729.463513][T28956] bond1165: (slave bridge1094): making interface the new active one [ 2729.483237][T28956] bridge1094: entered promiscuous mode [ 2729.497201][T28956] bond1165: (slave bridge1094): Enslaving as an active interface with an up link [ 2729.509033][T28971] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 19:48:43 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x6}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:43 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x95e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:43 executing program 3: r0 = epoll_create1(0x0) epoll_pwait(r0, &(0x7f0000000080)=[{}], 0x1, 0x96, 0x0, 0x0) 19:48:43 executing program 0: r0 = syz_init_net_socket$802154_dgram(0x24, 0x2, 0x0) writev(r0, &(0x7f0000000140)=[{&(0x7f0000000980)="28421cd4a4a21fac3d10c0a4200b622f6b19b22dbd8f8ddf28226f71861812feb045f7b9136580d9b28baebf9da57697e575f1036b2d903e02a6fe986bf663617377a3762d615399d8a9337cc7ba8ad5100de04a7d5192cf2ab62824c1663d84ae2d13d53d99f15085c3355a0b63c0f40afa7c1d82536183c6c57ee095a47c41fd439a232f21ad1359f060f0aeb10fa084891e3d0adf0a259af9873f1353f9dcb854ad0b25509961d7ea68740302bf35be61b3201ff730bc1b5ca7ae5e2399d0e61fa5242c3b3580aafb7470d6d3fa2ee4ce651c223ef57ed1246527e29d1dbe707124af812930dcc4d034e67e9b67cc7ed5f6793b3cf251b357f6d629fde119d90dc6444729a179a59f6b2c7784454496e368e7ff874bf8843a879a21662ada589610d9b2bb3777c01a3e789f2b62feec567d0987bdb74ac1dd2d7cef3aa8afb6b90b3107aec38c06abc4fd7c5a7fb4aba3f8a9689f4e68eebe839c77763bd3be7b11f2492ce6eb5b8359d4a725e6a4792315a435641e19fd4905a2469e6d43f54f32abf8cc3b9c887496af24e2e6279ede5f7ccd2eef4685c8a6fe56be2268a40e66888c369c90b28907777e2f8252139d5b6e7387144cd9a5e14e3c1b6f8cd277537cdfd562e119f4feddbf7bc71980055648c39b2087212dea844be93b0b372d926ae4971df6c01886963226337f41bb8f6615b6a2ceb08d57256cc340d838630f5fa3a984374893aa2b6f58a7ecb0e091af7cbf9146c697c0611b5a86e5cdbebdd2bef5dd34a96376986df7d625d6ea162a3d407dcc4ed454759f3ceebb4485027a28d7571870f06c3daa7733132b5b8a6e7910707e734b62d16cfcb8a7f7c8779d4c884c8b78af62a3a8d9a5a6ed71a9ce81d60c0f0dcaf4e0ecb92396e971dc2809b53dd0a5ab08a248bc3552b8e6539dd9880a3e42870c02a587445a9e41548ddd21c8c8f0b9456db8e19432768559e3d91e05c747dbaec9debcb4307f2773980865d79a100e00edb2fd1079864e94d5d0415929a797d594813cbdc45c1b28856970ae7a78821e99355b8ac094566b9c49efbcb7f048579aea2b03ebd3efc6b4b6ae1779b918015b282bf8fe39eb832f56d3659f93f041094f09288d0c5505beeab13f3c423a08973515896ab432dbe011b474af580e8b59358ab54697fa9997546d1d419d82bad31007096d903afe3db61a280b7067c6963a8c900c21f1a0d1971b8a161dde78d955b2404f8a60af77259880d1c60049840ad3b3fe3cfe4a5c8ab94ecd294c42940c4fb80fed677b64e6c1fffeaed228f8ddf16cec0cc3175a4b9c1511e90684895c253ceffd97ebbd2d2c8ab6867355bce4f3b552ae8777a210bf0a604aa0af55eec4602d5395cba8fc267ed5e315b1468b1f4005c9b8fa8ca20cdde19d3d47f97768635ba2bb98f6b458b733421e80755858c42ecffadef8d2e67f6f41f7bfd6c96c7d17892f5d3a3ccbfbf11e9a663e7797b8ca758d686305b8b47e9392cd01841622a992639dc8590b39616a2dbf53817120c198469cbf772eaa34c7dfa5bd806909f6022661ae4df63ccdcff4a8279650df42d1a09aa2e63a52709f1b9236eee27941ecb2a11c90296251aede409334dc970d0e48336e6c03cc2a343d98b67be0f889e85329889d08869da17ec2e2d0fa0bfd94efe1a43b3ed970ee1a10d36f100597479731916bbc1b885c4bd8f08515cc34b87534374579b70365e816fe95899aa3c35756f9791dcf182b3e809b0f115b4a9f30bdd55d3af72481ecb8f903c072f8eb1c55d315ad4a1f69aa40ec81bbc2019cf74876187c21a7ed878c3957ec8a3ab328deb716d8aec74aefc09e927d9dfd3efcc7669f13660b001f87193b87113e190a6338a45fe25b1669264e3d42db7ec4bbe906390785b1e489c7f5ae0b31b51704e3e6ae34e69a7c1f7990e98355dde5c8da185d656ff5e6d150d0528f4f192e54d0f0754bdae561e124e13f166f8696f2708cced066a48cb67575a8be464b3fe9284523e0b22538c096ce4030278f8382ea7df5bb4923205c51d3e9da689c38932c98be63494ad7b5b04b60001d2689e769c9adf5d23967d5252da2c564579468ec59cfc64b7e03af6d21d14887d7a10eb4e0b3c7eaf9656ed28debc82149b21a94a4965d38a5f6aae496eb6fac7fbb0377a1702bbe3426ed339c11beabc09888b475a2a232a71b4344702c0c3f38c12bcaef772139f868f945ae62f3abf1930aa4c48d1ee94a21b26612387b8ddaf71397054482417f995d639db7a09d7d3a5db29d5ff03bc8cf64ff1f4d723900b0f6fcb5cd2ea327923cf543fc679bde0c430316e358578e8f4c0fc7680b5ac4a696b8d5a92aca6e772ee2a9f862c002e933bac9980832d0d14b09f7e7c97fdf92dca8a074ce157a04de7d17374a2729b5e431377331891918c883e823c91c393dd23513d210d4e9656aeb9f2dd9410ad82b04faf20f468017a7685a093ad3729fc7d6595d9cf07fbb9c65e801c9d2702aca9652e839658c772750a387f6c5b63994bfd0e270ce27c0077e54a7f007f416ec94e25d8bb31b80fedeb9b2def26335cdbffaef59a598903241d9fac7b003901824440c47396537e99555c0be3e4a2151df02fe85426b36daac69d3ab7378f64ce7acb28ea2aa2ad94ae55879a13a7483d3ca19809828505b9d560bd7adec288fad138e7bf6ed1752da88c09a65dadbd4aaa22a4a6457ad17654a4f794973ce09ce5d348b86e0d79d7d1febd3e185bed239f4d01db9165a9840b802187f97e475c357d45a69929a7e6e679b8c270df28ffc8e9de3266fea6899d67872e24a8e0dc3ab5eadf635ca894c2bb69d60d7fc48b7d3cb58b3d2378d1d11185086125cc80dfa390fc1293f526aa618285955f214478c0d325d85e9c9fa3a7df093fe49652c7d342cbaa6dc7052df54c41690dc80b642d830c41af0a52c807e173587cb22268fc03bd7a947fd531322819e511144f8d80bf7409ed30a76801de108f065b6f5765f372b38161b7097d9802a2f7b6dee57d771c3c8975e765ca3033a3ac229f9756d4c28b36b94cbca634b106f17f37e2534c7745ee65d013d7b6c8bf8e4809837c01494850a132a9ec6dc534186650f13df750b203e3a3a35bf88511be2716f99e50ce6887b59b78d2b3b19308de02b91867782fbce8fc16bcf366a3351f5a6e0e7f471b6bcfa3001e1d23789e146e36dd6281b6d64ac33448e76a0490bb71250dbfd29841e408404db983018ef046981c84f8ac607bd69a1f140520e3a234cbbbd964c754d6db11e049a29d0d07f79ff8e08152010c81080b996cfefbbe17aa9511fb1dcc259ff36bb518ad54fa27b842bb0ba9345ade654ddf5f2c91ba897e1117184b7c624adfe77e39741852487d7e62e5eeb4095701787e075411c10504ce28b8d43c77517283a95666f078223e9cf13e4b9e5ad6ec592134ea0873c378b6c6d2dab477bb2c1fcbab7ad49c48297bd0280cb5fb67dfd4a3a554a8d6e12c2a2811f35fa881f93af61c2c59fb53c89197d3536e419afc365d82841bf3bf4b90d7c649a4ea918820534104608fb97d02a4c4c1ecd2244807a2b450693afa417f95c09f20be2cc1e186ffb5f1b45ace36d9514831d5ef9871c5a2f148db35c3ddc31e124457625042d184d449e36b2e99ee40636dee6432a91e7e5b8dd932ad7ac843ee0aa5cf4c61d196dcc3873b5450205f2e869df6a18874164e69e5c9db594eddafae5b054ff7d364ae4dd6334a4146ef66d80c05f2bee0ae1e0a7b1a2bb04a1d43b5c9274c4dc9e7151c1d252297ca7a67de8f4f69640701c32922e11e324a7bc2cb738c09147316460e580144382ac0c00b6bdfccfa304ab1d643f3ae2cfe83b96e6d04d567e7569d0b87cc5ea33654ab79370e5c396e99b85ee982ba9d54230a0cbb0e4130e754840343e60288181da4bf74fe694ee912df5c6a13358e4bfd25632f7c00b5cce6ccc6ca2c3848d047d472d46188ca7698bddd08bde91ef541d8fdcd4ec25c3358bbdf5b945e2b55f2db0b406e1f43b984c9e3944179ade0f9ac2310fe825fcc970c6ffc0010000c7f2a38cdda1823cbe6279834ac390432b8a1029eee345b0b75954eb07dd65041eb4e1b84f51453fcf4485082a1ff23b60e444da32df63c618dab16020b131c319c1e4046c132d5bab8d2d333c7ba4482e8368d6e82ab1e8ca466cd50eb23d94df3a02d99d8d9d8639fa80087c6126b40664d22563bb73b23a463042eb1721102ed37c2b56e7d537f3b785e109c8a876c0a8122cab991f58fd59a2071281fbbdb9fb164e397576265d16cf7c6cc9cd96ae2d22c6aa11e34ea63f76b94838f05b648a310caff50b45b119fa3a7e646506ad3a77d04dd9c7ed692f2a930c14f80e248a02a98c9851fdeb774bf8ea72525f19f97b59cc89e7294bde549ad11541e62a2e68e4758f635141cf0376a3d8fa5d0abafe8659fbb9f1371d7ebe2f2d312557afa76f2376ffa0e0cce7084745ba97e643f958dd537f5e8c14e3bcba13bdd74197c22b1e5c714aabd0b7458af85db602f96b41f8323146c877c5a0e8817da93b16ed474000b1f24db0e4c872c3462b6ed920b22e9c0bd3d11ed7e43d207595aa74e593858faa7d931bede1e4a71bc2e469300ea01bc473a94578573332454f1c3e90062241d9907530ebd019f5a5a27ae5b24f0ecabda78bbdfb4f91af4c7aa2afe3e0c379b5e8f9c3a4b6f42b08d997fb71025a067480ae82d176006898ca5064cee7b51070b089ef1dc2beb6e93a3fc555b28da0a7c7452cac895913f4d43a64f1cb406078795641a4f5ce2afa01b880605fa581ec31bd0e3ca6392765feda52e8449ee5a2237c15ed0a9cc01d8543e68b7c5b4839352b9dc2a04d2afa19c496f7ff3ad28f18ab2b4e27d354f1cd7103c8d21344583bbb72cf78468af925a8a68649e37a6fefa942fb0f6478c5a9344b40dc2a86b1694ca29beca6d8c865df50926f0474dcdb1856260103305cd579ef4e1773e856c257457b0e93166a3584c96d9c21cd767001d678b5d3d87a73d285157931d42aa4d6e205f983578a84dd7cbf50dcac50f7a20cc577b1b76452de74d5479c362cef2e290590cd153b36ac1327c7ea2f3e68373363da3fa8de597262917981d5696d092cf62f44b973a33556138f2804c704dd15a9185327a9a3025c2b9bdd23af1eb9cdfaed2ebe865f8bea8d1055b0c69e4ab848ba0a9f2d13395122351e65d30c52a6a8fa1f94cf4210468f1814ad4625ac7696ac13a08914123fefc48921fbdaf700c6203d153a1cfcd9e83e6176959a0bbfe8735040e703e0a72ce0449d6ba70c27002b3d959e1c58dbbb63522a26913633aae332606faea8d8a970c384d268effb12e015c1079d6c3c1a6d74e43301930f8a0607f0cc6d2ff00b65cf53143fe7c67660375082b68abfcc557ae3c67755bbaa78bdc982f95efe2feeece78022167af00398264c0fc6c3085ad746b4c00b1c0bea37e8d8209fdd22c7e3aea3e7adb08b970c84f8fb970369b4bb5b66df9df5fc1025e95ced89e8a52259e6ac52c1d8653ac89fa14c52e53cee15920ba8e25e83ef1836971d5afcece817ca3175201c2f4923a3697e770739d8468e155cfb706967b108ce0f0866f81db8bb758c515800efc00775c379040b2f24019b511b9bf5944a3716f39d5d398ebf4da853a94765229bb63fc255c8ebcac5e7c0e8bcfa329ceb5b1be203e45717ab8b5f512ef8d4f80d2c7933449fd8e342529c7d943e23e635078f79a1340e2db2cd20ee736288", 0x1000}], 0x1) socket(0x15, 0x4, 0xf56) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000100)='./cgroup.cpu/syz0\x00', 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f00000002c0)='cpuacct.usage_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r4, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r5}, @void}}}, 0x1c}}, 0x0) sendmsg$NL80211_CMD_EXTERNAL_AUTH(r3, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)=ANY=[@ANYBLOB='4\x00\x00', @ANYRES16=0x0, @ANYBLOB="01002abd7000fddbdf257f00000008000300", @ANYRES32=0x0, @ANYBLOB="0c009900000000005c0000000a0034000202020202020000"], 0x34}, 0x1, 0x0, 0x0, 0x55cdcbd2fecf7229}, 0x55) r6 = socket(0x26, 0x6, 0x9) sendmsg$NL80211_CMD_SET_INTERFACE(r6, &(0x7f00000001c0)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x8000000}, 0xc, 0x0, 0x1, 0x0, 0x0, 0x200c0}, 0x90) syz_init_net_socket$802154_dgram(0x24, 0x2, 0x0) (async) writev(r0, &(0x7f0000000140)=[{&(0x7f0000000980)="28421cd4a4a21fac3d10c0a4200b622f6b19b22dbd8f8ddf28226f71861812feb045f7b9136580d9b28baebf9da57697e575f1036b2d903e02a6fe986bf663617377a3762d615399d8a9337cc7ba8ad5100de04a7d5192cf2ab62824c1663d84ae2d13d53d99f15085c3355a0b63c0f40afa7c1d82536183c6c57ee095a47c41fd439a232f21ad1359f060f0aeb10fa084891e3d0adf0a259af9873f1353f9dcb854ad0b25509961d7ea68740302bf35be61b3201ff730bc1b5ca7ae5e2399d0e61fa5242c3b3580aafb7470d6d3fa2ee4ce651c223ef57ed1246527e29d1dbe707124af812930dcc4d034e67e9b67cc7ed5f6793b3cf251b357f6d629fde119d90dc6444729a179a59f6b2c7784454496e368e7ff874bf8843a879a21662ada589610d9b2bb3777c01a3e789f2b62feec567d0987bdb74ac1dd2d7cef3aa8afb6b90b3107aec38c06abc4fd7c5a7fb4aba3f8a9689f4e68eebe839c77763bd3be7b11f2492ce6eb5b8359d4a725e6a4792315a435641e19fd4905a2469e6d43f54f32abf8cc3b9c887496af24e2e6279ede5f7ccd2eef4685c8a6fe56be2268a40e66888c369c90b28907777e2f8252139d5b6e7387144cd9a5e14e3c1b6f8cd277537cdfd562e119f4feddbf7bc71980055648c39b2087212dea844be93b0b372d926ae4971df6c01886963226337f41bb8f6615b6a2ceb08d57256cc340d838630f5fa3a984374893aa2b6f58a7ecb0e091af7cbf9146c697c0611b5a86e5cdbebdd2bef5dd34a96376986df7d625d6ea162a3d407dcc4ed454759f3ceebb4485027a28d7571870f06c3daa7733132b5b8a6e7910707e734b62d16cfcb8a7f7c8779d4c884c8b78af62a3a8d9a5a6ed71a9ce81d60c0f0dcaf4e0ecb92396e971dc2809b53dd0a5ab08a248bc3552b8e6539dd9880a3e42870c02a587445a9e41548ddd21c8c8f0b9456db8e19432768559e3d91e05c747dbaec9debcb4307f2773980865d79a100e00edb2fd1079864e94d5d0415929a797d594813cbdc45c1b28856970ae7a78821e99355b8ac094566b9c49efbcb7f048579aea2b03ebd3efc6b4b6ae1779b918015b282bf8fe39eb832f56d3659f93f041094f09288d0c5505beeab13f3c423a08973515896ab432dbe011b474af580e8b59358ab54697fa9997546d1d419d82bad31007096d903afe3db61a280b7067c6963a8c900c21f1a0d1971b8a161dde78d955b2404f8a60af77259880d1c60049840ad3b3fe3cfe4a5c8ab94ecd294c42940c4fb80fed677b64e6c1fffeaed228f8ddf16cec0cc3175a4b9c1511e90684895c253ceffd97ebbd2d2c8ab6867355bce4f3b552ae8777a210bf0a604aa0af55eec4602d5395cba8fc267ed5e315b1468b1f4005c9b8fa8ca20cdde19d3d47f97768635ba2bb98f6b458b733421e80755858c42ecffadef8d2e67f6f41f7bfd6c96c7d17892f5d3a3ccbfbf11e9a663e7797b8ca758d686305b8b47e9392cd01841622a992639dc8590b39616a2dbf53817120c198469cbf772eaa34c7dfa5bd806909f6022661ae4df63ccdcff4a8279650df42d1a09aa2e63a52709f1b9236eee27941ecb2a11c90296251aede409334dc970d0e48336e6c03cc2a343d98b67be0f889e85329889d08869da17ec2e2d0fa0bfd94efe1a43b3ed970ee1a10d36f100597479731916bbc1b885c4bd8f08515cc34b87534374579b70365e816fe95899aa3c35756f9791dcf182b3e809b0f115b4a9f30bdd55d3af72481ecb8f903c072f8eb1c55d315ad4a1f69aa40ec81bbc2019cf74876187c21a7ed878c3957ec8a3ab328deb716d8aec74aefc09e927d9dfd3efcc7669f13660b001f87193b87113e190a6338a45fe25b1669264e3d42db7ec4bbe906390785b1e489c7f5ae0b31b51704e3e6ae34e69a7c1f7990e98355dde5c8da185d656ff5e6d150d0528f4f192e54d0f0754bdae561e124e13f166f8696f2708cced066a48cb67575a8be464b3fe9284523e0b22538c096ce4030278f8382ea7df5bb4923205c51d3e9da689c38932c98be63494ad7b5b04b60001d2689e769c9adf5d23967d5252da2c564579468ec59cfc64b7e03af6d21d14887d7a10eb4e0b3c7eaf9656ed28debc82149b21a94a4965d38a5f6aae496eb6fac7fbb0377a1702bbe3426ed339c11beabc09888b475a2a232a71b4344702c0c3f38c12bcaef772139f868f945ae62f3abf1930aa4c48d1ee94a21b26612387b8ddaf71397054482417f995d639db7a09d7d3a5db29d5ff03bc8cf64ff1f4d723900b0f6fcb5cd2ea327923cf543fc679bde0c430316e358578e8f4c0fc7680b5ac4a696b8d5a92aca6e772ee2a9f862c002e933bac9980832d0d14b09f7e7c97fdf92dca8a074ce157a04de7d17374a2729b5e431377331891918c883e823c91c393dd23513d210d4e9656aeb9f2dd9410ad82b04faf20f468017a7685a093ad3729fc7d6595d9cf07fbb9c65e801c9d2702aca9652e839658c772750a387f6c5b63994bfd0e270ce27c0077e54a7f007f416ec94e25d8bb31b80fedeb9b2def26335cdbffaef59a598903241d9fac7b003901824440c47396537e99555c0be3e4a2151df02fe85426b36daac69d3ab7378f64ce7acb28ea2aa2ad94ae55879a13a7483d3ca19809828505b9d560bd7adec288fad138e7bf6ed1752da88c09a65dadbd4aaa22a4a6457ad17654a4f794973ce09ce5d348b86e0d79d7d1febd3e185bed239f4d01db9165a9840b802187f97e475c357d45a69929a7e6e679b8c270df28ffc8e9de3266fea6899d67872e24a8e0dc3ab5eadf635ca894c2bb69d60d7fc48b7d3cb58b3d2378d1d11185086125cc80dfa390fc1293f526aa618285955f214478c0d325d85e9c9fa3a7df093fe49652c7d342cbaa6dc7052df54c41690dc80b642d830c41af0a52c807e173587cb22268fc03bd7a947fd531322819e511144f8d80bf7409ed30a76801de108f065b6f5765f372b38161b7097d9802a2f7b6dee57d771c3c8975e765ca3033a3ac229f9756d4c28b36b94cbca634b106f17f37e2534c7745ee65d013d7b6c8bf8e4809837c01494850a132a9ec6dc534186650f13df750b203e3a3a35bf88511be2716f99e50ce6887b59b78d2b3b19308de02b91867782fbce8fc16bcf366a3351f5a6e0e7f471b6bcfa3001e1d23789e146e36dd6281b6d64ac33448e76a0490bb71250dbfd29841e408404db983018ef046981c84f8ac607bd69a1f140520e3a234cbbbd964c754d6db11e049a29d0d07f79ff8e08152010c81080b996cfefbbe17aa9511fb1dcc259ff36bb518ad54fa27b842bb0ba9345ade654ddf5f2c91ba897e1117184b7c624adfe77e39741852487d7e62e5eeb4095701787e075411c10504ce28b8d43c77517283a95666f078223e9cf13e4b9e5ad6ec592134ea0873c378b6c6d2dab477bb2c1fcbab7ad49c48297bd0280cb5fb67dfd4a3a554a8d6e12c2a2811f35fa881f93af61c2c59fb53c89197d3536e419afc365d82841bf3bf4b90d7c649a4ea918820534104608fb97d02a4c4c1ecd2244807a2b450693afa417f95c09f20be2cc1e186ffb5f1b45ace36d9514831d5ef9871c5a2f148db35c3ddc31e124457625042d184d449e36b2e99ee40636dee6432a91e7e5b8dd932ad7ac843ee0aa5cf4c61d196dcc3873b5450205f2e869df6a18874164e69e5c9db594eddafae5b054ff7d364ae4dd6334a4146ef66d80c05f2bee0ae1e0a7b1a2bb04a1d43b5c9274c4dc9e7151c1d252297ca7a67de8f4f69640701c32922e11e324a7bc2cb738c09147316460e580144382ac0c00b6bdfccfa304ab1d643f3ae2cfe83b96e6d04d567e7569d0b87cc5ea33654ab79370e5c396e99b85ee982ba9d54230a0cbb0e4130e754840343e60288181da4bf74fe694ee912df5c6a13358e4bfd25632f7c00b5cce6ccc6ca2c3848d047d472d46188ca7698bddd08bde91ef541d8fdcd4ec25c3358bbdf5b945e2b55f2db0b406e1f43b984c9e3944179ade0f9ac2310fe825fcc970c6ffc0010000c7f2a38cdda1823cbe6279834ac390432b8a1029eee345b0b75954eb07dd65041eb4e1b84f51453fcf4485082a1ff23b60e444da32df63c618dab16020b131c319c1e4046c132d5bab8d2d333c7ba4482e8368d6e82ab1e8ca466cd50eb23d94df3a02d99d8d9d8639fa80087c6126b40664d22563bb73b23a463042eb1721102ed37c2b56e7d537f3b785e109c8a876c0a8122cab991f58fd59a2071281fbbdb9fb164e397576265d16cf7c6cc9cd96ae2d22c6aa11e34ea63f76b94838f05b648a310caff50b45b119fa3a7e646506ad3a77d04dd9c7ed692f2a930c14f80e248a02a98c9851fdeb774bf8ea72525f19f97b59cc89e7294bde549ad11541e62a2e68e4758f635141cf0376a3d8fa5d0abafe8659fbb9f1371d7ebe2f2d312557afa76f2376ffa0e0cce7084745ba97e643f958dd537f5e8c14e3bcba13bdd74197c22b1e5c714aabd0b7458af85db602f96b41f8323146c877c5a0e8817da93b16ed474000b1f24db0e4c872c3462b6ed920b22e9c0bd3d11ed7e43d207595aa74e593858faa7d931bede1e4a71bc2e469300ea01bc473a94578573332454f1c3e90062241d9907530ebd019f5a5a27ae5b24f0ecabda78bbdfb4f91af4c7aa2afe3e0c379b5e8f9c3a4b6f42b08d997fb71025a067480ae82d176006898ca5064cee7b51070b089ef1dc2beb6e93a3fc555b28da0a7c7452cac895913f4d43a64f1cb406078795641a4f5ce2afa01b880605fa581ec31bd0e3ca6392765feda52e8449ee5a2237c15ed0a9cc01d8543e68b7c5b4839352b9dc2a04d2afa19c496f7ff3ad28f18ab2b4e27d354f1cd7103c8d21344583bbb72cf78468af925a8a68649e37a6fefa942fb0f6478c5a9344b40dc2a86b1694ca29beca6d8c865df50926f0474dcdb1856260103305cd579ef4e1773e856c257457b0e93166a3584c96d9c21cd767001d678b5d3d87a73d285157931d42aa4d6e205f983578a84dd7cbf50dcac50f7a20cc577b1b76452de74d5479c362cef2e290590cd153b36ac1327c7ea2f3e68373363da3fa8de597262917981d5696d092cf62f44b973a33556138f2804c704dd15a9185327a9a3025c2b9bdd23af1eb9cdfaed2ebe865f8bea8d1055b0c69e4ab848ba0a9f2d13395122351e65d30c52a6a8fa1f94cf4210468f1814ad4625ac7696ac13a08914123fefc48921fbdaf700c6203d153a1cfcd9e83e6176959a0bbfe8735040e703e0a72ce0449d6ba70c27002b3d959e1c58dbbb63522a26913633aae332606faea8d8a970c384d268effb12e015c1079d6c3c1a6d74e43301930f8a0607f0cc6d2ff00b65cf53143fe7c67660375082b68abfcc557ae3c67755bbaa78bdc982f95efe2feeece78022167af00398264c0fc6c3085ad746b4c00b1c0bea37e8d8209fdd22c7e3aea3e7adb08b970c84f8fb970369b4bb5b66df9df5fc1025e95ced89e8a52259e6ac52c1d8653ac89fa14c52e53cee15920ba8e25e83ef1836971d5afcece817ca3175201c2f4923a3697e770739d8468e155cfb706967b108ce0f0866f81db8bb758c515800efc00775c379040b2f24019b511b9bf5944a3716f39d5d398ebf4da853a94765229bb63fc255c8ebcac5e7c0e8bcfa329ceb5b1be203e45717ab8b5f512ef8d4f80d2c7933449fd8e342529c7d943e23e635078f79a1340e2db2cd20ee736288", 0x1000}], 0x1) (async) socket(0x15, 0x4, 0xf56) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000100)='./cgroup.cpu/syz0\x00', 0x200002, 0x0) (async) openat$cgroup_ro(r1, &(0x7f00000002c0)='cpuacct.usage_sys\x00', 0x275a, 0x0) (async) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00'}) (async) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r4, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r5}, @void}}}, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_EXTERNAL_AUTH(r3, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)=ANY=[@ANYBLOB='4\x00\x00', @ANYRES16=0x0, @ANYBLOB="01002abd7000fddbdf257f00000008000300", @ANYRES32=0x0, @ANYBLOB="0c009900000000005c0000000a0034000202020202020000"], 0x34}, 0x1, 0x0, 0x0, 0x55cdcbd2fecf7229}, 0x55) (async) socket(0x26, 0x6, 0x9) (async) sendmsg$NL80211_CMD_SET_INTERFACE(r6, &(0x7f00000001c0)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x8000000}, 0xc, 0x0, 0x1, 0x0, 0x0, 0x200c0}, 0x90) (async) 19:48:43 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async) r1 = socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket(0x10, 0x803, 0x0) sendmsg$nl_route_sched(r2, &(0x7f0000000740)={0x0, 0x0, &(0x7f0000000780)={0x0, 0x54}}, 0x0) getsockname$packet(r2, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000500)=ANY=[@ANYBLOB="3c00000010008506000000ff0100000000000000", @ANYRES32=r3, @ANYBLOB="00010000000000001c0012000c000100626f6e64"], 0x3c}}, 0x0) (async) sendmsg$nl_route_sched(r1, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000040)=@newqdisc={0x17c, 0x24, 0x400, 0x70bd2c, 0x25dfdbff, {0x0, 0x0, 0x0, 0x0, {0x9, 0xfff3}, {0xa, 0xa}, {0x1, 0xb}}, [@TCA_STAB={0xac, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x80, 0x1, 0x1, 0x200, 0x0, 0x6, 0x7, 0x9}}, {0x16, 0x2, [0xfc00, 0x0, 0xffff, 0x3, 0x0, 0xfff8, 0x9, 0x1, 0x2]}}, {{0x1c, 0x1, {0x0, 0x7, 0x1, 0x0, 0x0, 0x0, 0x2, 0x2}}, {0x8, 0x2, [0x6, 0x8]}}, {{0x1c, 0x1, {0x40, 0x92, 0x100, 0x7, 0x1, 0xfffffffe, 0x6, 0x2}}, {0x8, 0x2, [0x3, 0x5]}}, {{0x1c, 0x1, {0xea, 0x80, 0xffff, 0xe1e, 0x0, 0xd084, 0xfff, 0x5}}, {0xe, 0x2, [0x5b7, 0x7, 0x0, 0x0, 0x2]}}]}, @TCA_RATE={0x6, 0x5, {0x0, 0xd9}}, @TCA_RATE={0x6, 0x5, {0x3, 0x7f}}, @qdisc_kind_options=@q_pfifo_head_drop={{0x14}, {0x8, 0x2, 0x4}}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x9000}, @qdisc_kind_options=@q_plug={{0x9}, {0xc, 0x2, {0x0, 0x9}}}, @TCA_STAB={0x50, 0x8, 0x0, 0x1, [{{0x1c, 0x1, {0x2, 0xa2, 0x14db, 0x4, 0x2, 0x0, 0xfff, 0x2}}, {0x8, 0x2, [0xffe0, 0xa2eb]}}, {{0x1c, 0x1, {0x4, 0x6, 0x95, 0x1f, 0x0, 0xfa, 0x9, 0x3}}, {0xa, 0x2, [0xffff, 0x0, 0x4]}}]}, @TCA_INGRESS_BLOCK={0x8, 0xd, 0x20}, @TCA_EGRESS_BLOCK={0x8}]}, 0x17c}, 0x1, 0x0, 0x0, 0x4008000}, 0x80) 19:48:43 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000140)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r2, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r3}, @void}}}, 0x1c}}, 0x0) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), r0) sendmsg$NL80211_CMD_DISCONNECT(r1, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x24, r4, 0x400, 0x70bd26, 0x25dfdbfd, {{}, {@void, @void}}, [@NL80211_ATTR_REASON_CODE={0x6, 0x36, 0x30}, @NL80211_ATTR_REASON_CODE={0x6, 0x36, 0x1a}]}, 0x24}, 0x1, 0x0, 0x0, 0x8040}, 0x20000000) 19:48:43 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x90150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:43 executing program 3: r0 = epoll_create1(0x0) epoll_pwait(r0, &(0x7f0000000080)=[{}], 0x1, 0x96, 0x0, 0x0) [ 2729.550119][T28971] workqueue: Failed to create a rescuer kthread for wq "bond1237": -EINTR [ 2729.746378][T28991] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2729.878014][T28991] bond1285: entered promiscuous mode [ 2729.884327][T28991] 8021q: adding VLAN 0 to HW filter on device bond1285 19:48:43 executing program 0: r0 = syz_init_net_socket$802154_dgram(0x24, 0x2, 0x0) writev(r0, &(0x7f0000000140)=[{&(0x7f0000000980)="28421cd4a4a21fac3d10c0a4200b622f6b19b22dbd8f8ddf28226f71861812feb045f7b9136580d9b28baebf9da57697e575f1036b2d903e02a6fe986bf663617377a3762d615399d8a9337cc7ba8ad5100de04a7d5192cf2ab62824c1663d84ae2d13d53d99f15085c3355a0b63c0f40afa7c1d82536183c6c57ee095a47c41fd439a232f21ad1359f060f0aeb10fa084891e3d0adf0a259af9873f1353f9dcb854ad0b25509961d7ea68740302bf35be61b3201ff730bc1b5ca7ae5e2399d0e61fa5242c3b3580aafb7470d6d3fa2ee4ce651c223ef57ed1246527e29d1dbe707124af812930dcc4d034e67e9b67cc7ed5f6793b3cf251b357f6d629fde119d90dc6444729a179a59f6b2c7784454496e368e7ff874bf8843a879a21662ada589610d9b2bb3777c01a3e789f2b62feec567d0987bdb74ac1dd2d7cef3aa8afb6b90b3107aec38c06abc4fd7c5a7fb4aba3f8a9689f4e68eebe839c77763bd3be7b11f2492ce6eb5b8359d4a725e6a4792315a435641e19fd4905a2469e6d43f54f32abf8cc3b9c887496af24e2e6279ede5f7ccd2eef4685c8a6fe56be2268a40e66888c369c90b28907777e2f8252139d5b6e7387144cd9a5e14e3c1b6f8cd277537cdfd562e119f4feddbf7bc71980055648c39b2087212dea844be93b0b372d926ae4971df6c01886963226337f41bb8f6615b6a2ceb08d57256cc340d838630f5fa3a984374893aa2b6f58a7ecb0e091af7cbf9146c697c0611b5a86e5cdbebdd2bef5dd34a96376986df7d625d6ea162a3d407dcc4ed454759f3ceebb4485027a28d7571870f06c3daa7733132b5b8a6e7910707e734b62d16cfcb8a7f7c8779d4c884c8b78af62a3a8d9a5a6ed71a9ce81d60c0f0dcaf4e0ecb92396e971dc2809b53dd0a5ab08a248bc3552b8e6539dd9880a3e42870c02a587445a9e41548ddd21c8c8f0b9456db8e19432768559e3d91e05c747dbaec9debcb4307f2773980865d79a100e00edb2fd1079864e94d5d0415929a797d594813cbdc45c1b28856970ae7a78821e99355b8ac094566b9c49efbcb7f048579aea2b03ebd3efc6b4b6ae1779b918015b282bf8fe39eb832f56d3659f93f041094f09288d0c5505beeab13f3c423a08973515896ab432dbe011b474af580e8b59358ab54697fa9997546d1d419d82bad31007096d903afe3db61a280b7067c6963a8c900c21f1a0d1971b8a161dde78d955b2404f8a60af77259880d1c60049840ad3b3fe3cfe4a5c8ab94ecd294c42940c4fb80fed677b64e6c1fffeaed228f8ddf16cec0cc3175a4b9c1511e90684895c253ceffd97ebbd2d2c8ab6867355bce4f3b552ae8777a210bf0a604aa0af55eec4602d5395cba8fc267ed5e315b1468b1f4005c9b8fa8ca20cdde19d3d47f97768635ba2bb98f6b458b733421e80755858c42ecffadef8d2e67f6f41f7bfd6c96c7d17892f5d3a3ccbfbf11e9a663e7797b8ca758d686305b8b47e9392cd01841622a992639dc8590b39616a2dbf53817120c198469cbf772eaa34c7dfa5bd806909f6022661ae4df63ccdcff4a8279650df42d1a09aa2e63a52709f1b9236eee27941ecb2a11c90296251aede409334dc970d0e48336e6c03cc2a343d98b67be0f889e85329889d08869da17ec2e2d0fa0bfd94efe1a43b3ed970ee1a10d36f100597479731916bbc1b885c4bd8f08515cc34b87534374579b70365e816fe95899aa3c35756f9791dcf182b3e809b0f115b4a9f30bdd55d3af72481ecb8f903c072f8eb1c55d315ad4a1f69aa40ec81bbc2019cf74876187c21a7ed878c3957ec8a3ab328deb716d8aec74aefc09e927d9dfd3efcc7669f13660b001f87193b87113e190a6338a45fe25b1669264e3d42db7ec4bbe906390785b1e489c7f5ae0b31b51704e3e6ae34e69a7c1f7990e98355dde5c8da185d656ff5e6d150d0528f4f192e54d0f0754bdae561e124e13f166f8696f2708cced066a48cb67575a8be464b3fe9284523e0b22538c096ce4030278f8382ea7df5bb4923205c51d3e9da689c38932c98be63494ad7b5b04b60001d2689e769c9adf5d23967d5252da2c564579468ec59cfc64b7e03af6d21d14887d7a10eb4e0b3c7eaf9656ed28debc82149b21a94a4965d38a5f6aae496eb6fac7fbb0377a1702bbe3426ed339c11beabc09888b475a2a232a71b4344702c0c3f38c12bcaef772139f868f945ae62f3abf1930aa4c48d1ee94a21b26612387b8ddaf71397054482417f995d639db7a09d7d3a5db29d5ff03bc8cf64ff1f4d723900b0f6fcb5cd2ea327923cf543fc679bde0c430316e358578e8f4c0fc7680b5ac4a696b8d5a92aca6e772ee2a9f862c002e933bac9980832d0d14b09f7e7c97fdf92dca8a074ce157a04de7d17374a2729b5e431377331891918c883e823c91c393dd23513d210d4e9656aeb9f2dd9410ad82b04faf20f468017a7685a093ad3729fc7d6595d9cf07fbb9c65e801c9d2702aca9652e839658c772750a387f6c5b63994bfd0e270ce27c0077e54a7f007f416ec94e25d8bb31b80fedeb9b2def26335cdbffaef59a598903241d9fac7b003901824440c47396537e99555c0be3e4a2151df02fe85426b36daac69d3ab7378f64ce7acb28ea2aa2ad94ae55879a13a7483d3ca19809828505b9d560bd7adec288fad138e7bf6ed1752da88c09a65dadbd4aaa22a4a6457ad17654a4f794973ce09ce5d348b86e0d79d7d1febd3e185bed239f4d01db9165a9840b802187f97e475c357d45a69929a7e6e679b8c270df28ffc8e9de3266fea6899d67872e24a8e0dc3ab5eadf635ca894c2bb69d60d7fc48b7d3cb58b3d2378d1d11185086125cc80dfa390fc1293f526aa618285955f214478c0d325d85e9c9fa3a7df093fe49652c7d342cbaa6dc7052df54c41690dc80b642d830c41af0a52c807e173587cb22268fc03bd7a947fd531322819e511144f8d80bf7409ed30a76801de108f065b6f5765f372b38161b7097d9802a2f7b6dee57d771c3c8975e765ca3033a3ac229f9756d4c28b36b94cbca634b106f17f37e2534c7745ee65d013d7b6c8bf8e4809837c01494850a132a9ec6dc534186650f13df750b203e3a3a35bf88511be2716f99e50ce6887b59b78d2b3b19308de02b91867782fbce8fc16bcf366a3351f5a6e0e7f471b6bcfa3001e1d23789e146e36dd6281b6d64ac33448e76a0490bb71250dbfd29841e408404db983018ef046981c84f8ac607bd69a1f140520e3a234cbbbd964c754d6db11e049a29d0d07f79ff8e08152010c81080b996cfefbbe17aa9511fb1dcc259ff36bb518ad54fa27b842bb0ba9345ade654ddf5f2c91ba897e1117184b7c624adfe77e39741852487d7e62e5eeb4095701787e075411c10504ce28b8d43c77517283a95666f078223e9cf13e4b9e5ad6ec592134ea0873c378b6c6d2dab477bb2c1fcbab7ad49c48297bd0280cb5fb67dfd4a3a554a8d6e12c2a2811f35fa881f93af61c2c59fb53c89197d3536e419afc365d82841bf3bf4b90d7c649a4ea918820534104608fb97d02a4c4c1ecd2244807a2b450693afa417f95c09f20be2cc1e186ffb5f1b45ace36d9514831d5ef9871c5a2f148db35c3ddc31e124457625042d184d449e36b2e99ee40636dee6432a91e7e5b8dd932ad7ac843ee0aa5cf4c61d196dcc3873b5450205f2e869df6a18874164e69e5c9db594eddafae5b054ff7d364ae4dd6334a4146ef66d80c05f2bee0ae1e0a7b1a2bb04a1d43b5c9274c4dc9e7151c1d252297ca7a67de8f4f69640701c32922e11e324a7bc2cb738c09147316460e580144382ac0c00b6bdfccfa304ab1d643f3ae2cfe83b96e6d04d567e7569d0b87cc5ea33654ab79370e5c396e99b85ee982ba9d54230a0cbb0e4130e754840343e60288181da4bf74fe694ee912df5c6a13358e4bfd25632f7c00b5cce6ccc6ca2c3848d047d472d46188ca7698bddd08bde91ef541d8fdcd4ec25c3358bbdf5b945e2b55f2db0b406e1f43b984c9e3944179ade0f9ac2310fe825fcc970c6ffc0010000c7f2a38cdda1823cbe6279834ac390432b8a1029eee345b0b75954eb07dd65041eb4e1b84f51453fcf4485082a1ff23b60e444da32df63c618dab16020b131c319c1e4046c132d5bab8d2d333c7ba4482e8368d6e82ab1e8ca466cd50eb23d94df3a02d99d8d9d8639fa80087c6126b40664d22563bb73b23a463042eb1721102ed37c2b56e7d537f3b785e109c8a876c0a8122cab991f58fd59a2071281fbbdb9fb164e397576265d16cf7c6cc9cd96ae2d22c6aa11e34ea63f76b94838f05b648a310caff50b45b119fa3a7e646506ad3a77d04dd9c7ed692f2a930c14f80e248a02a98c9851fdeb774bf8ea72525f19f97b59cc89e7294bde549ad11541e62a2e68e4758f635141cf0376a3d8fa5d0abafe8659fbb9f1371d7ebe2f2d312557afa76f2376ffa0e0cce7084745ba97e643f958dd537f5e8c14e3bcba13bdd74197c22b1e5c714aabd0b7458af85db602f96b41f8323146c877c5a0e8817da93b16ed474000b1f24db0e4c872c3462b6ed920b22e9c0bd3d11ed7e43d207595aa74e593858faa7d931bede1e4a71bc2e469300ea01bc473a94578573332454f1c3e90062241d9907530ebd019f5a5a27ae5b24f0ecabda78bbdfb4f91af4c7aa2afe3e0c379b5e8f9c3a4b6f42b08d997fb71025a067480ae82d176006898ca5064cee7b51070b089ef1dc2beb6e93a3fc555b28da0a7c7452cac895913f4d43a64f1cb406078795641a4f5ce2afa01b880605fa581ec31bd0e3ca6392765feda52e8449ee5a2237c15ed0a9cc01d8543e68b7c5b4839352b9dc2a04d2afa19c496f7ff3ad28f18ab2b4e27d354f1cd7103c8d21344583bbb72cf78468af925a8a68649e37a6fefa942fb0f6478c5a9344b40dc2a86b1694ca29beca6d8c865df50926f0474dcdb1856260103305cd579ef4e1773e856c257457b0e93166a3584c96d9c21cd767001d678b5d3d87a73d285157931d42aa4d6e205f983578a84dd7cbf50dcac50f7a20cc577b1b76452de74d5479c362cef2e290590cd153b36ac1327c7ea2f3e68373363da3fa8de597262917981d5696d092cf62f44b973a33556138f2804c704dd15a9185327a9a3025c2b9bdd23af1eb9cdfaed2ebe865f8bea8d1055b0c69e4ab848ba0a9f2d13395122351e65d30c52a6a8fa1f94cf4210468f1814ad4625ac7696ac13a08914123fefc48921fbdaf700c6203d153a1cfcd9e83e6176959a0bbfe8735040e703e0a72ce0449d6ba70c27002b3d959e1c58dbbb63522a26913633aae332606faea8d8a970c384d268effb12e015c1079d6c3c1a6d74e43301930f8a0607f0cc6d2ff00b65cf53143fe7c67660375082b68abfcc557ae3c67755bbaa78bdc982f95efe2feeece78022167af00398264c0fc6c3085ad746b4c00b1c0bea37e8d8209fdd22c7e3aea3e7adb08b970c84f8fb970369b4bb5b66df9df5fc1025e95ced89e8a52259e6ac52c1d8653ac89fa14c52e53cee15920ba8e25e83ef1836971d5afcece817ca3175201c2f4923a3697e770739d8468e155cfb706967b108ce0f0866f81db8bb758c515800efc00775c379040b2f24019b511b9bf5944a3716f39d5d398ebf4da853a94765229bb63fc255c8ebcac5e7c0e8bcfa329ceb5b1be203e45717ab8b5f512ef8d4f80d2c7933449fd8e342529c7d943e23e635078f79a1340e2db2cd20ee736288", 0x1000}], 0x1) socket(0x15, 0x4, 0xf56) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000100)='./cgroup.cpu/syz0\x00', 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f00000002c0)='cpuacct.usage_sys\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000380), 0x101bf) r3 = socket$nl_generic(0x10, 0x3, 0x10) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r3, 0x8933, &(0x7f0000000880)={'wlan0\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r3, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r4, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r5}, @void}}}, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_EXTERNAL_AUTH(r3, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)=ANY=[@ANYBLOB='4\x00\x00', @ANYRES16=0x0, @ANYBLOB="01002abd7000fddbdf257f00000008000300", @ANYRES32=0x0, @ANYBLOB="0c009900000000005c0000000a0034000202020202020000"], 0x34}, 0x1, 0x0, 0x0, 0x55cdcbd2fecf7229}, 0x55) (async) r6 = socket(0x26, 0x6, 0x9) sendmsg$NL80211_CMD_SET_INTERFACE(r6, &(0x7f00000001c0)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x8000000}, 0xc, 0x0, 0x1, 0x0, 0x0, 0x200c0}, 0x90) 19:48:44 executing program 3: r0 = epoll_create1(0x0) epoll_pwait(r0, &(0x7f0000000080)=[{}], 0x1, 0x96, 0x0, 0x0) [ 2730.111422][T28997] bond1285: (slave bridge1214): making interface the new active one [ 2730.124627][T28997] bridge1214: entered promiscuous mode [ 2730.141771][T28997] bond1285: (slave bridge1214): Enslaving as an active interface with an up link 19:48:44 executing program 3: r0 = epoll_create1(0x0) epoll_pwait(r0, &(0x7f0000000080)=[{}], 0x1, 0x96, 0x0, 0x0) 19:48:44 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x96e70500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2730.252132][T29005] bond1166: entered promiscuous mode [ 2730.316633][T29005] 8021q: adding VLAN 0 to HW filter on device bond1166 [ 2730.587944][T29006] bond1166: (slave bridge1095): making interface the new active one [ 2730.639860][T29006] bridge1095: entered promiscuous mode [ 2730.682164][T29006] bond1166: (slave bridge1095): Enslaving as an active interface with an up link 19:48:44 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x7}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:44 executing program 3: socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000001d80)) 19:48:44 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000380), 0x101bf) (async) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000600), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r0, 0x8933, &(0x7f0000000140)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_STOP_P2P_DEVICE(r1, &(0x7f0000000940)={0x0, 0x0, &(0x7f0000000900)={&(0x7f00000008c0)={0x1c, r2, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r3}, @void}}}, 0x1c}}, 0x0) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), r0) sendmsg$NL80211_CMD_DISCONNECT(r1, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x24, r4, 0x400, 0x70bd26, 0x25dfdbfd, {{}, {@void, @void}}, [@NL80211_ATTR_REASON_CODE={0x6, 0x36, 0x30}, @NL80211_ATTR_REASON_CODE={0x6, 0x36, 0x1a}]}, 0x24}, 0x1, 0x0, 0x0, 0x8040}, 0x20000000) 19:48:44 executing program 3: accept$unix(0xffffffffffffffff, &(0x7f0000002fc0), 0x0) 19:48:44 executing program 3: r0 = socket$key(0xf, 0x3, 0x2) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={0x0}, 0x10) sendmsg$key(r0, &(0x7f0000000000)={0x500, 0x0, &(0x7f0000000140)={&(0x7f0000000040)={0x2, 0xa, 0x0, 0x0, 0x2}, 0x10}}, 0x0) 19:48:45 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x91150600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 19:48:45 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000002c0)='cpuacct.usage_percpu_sys\x00', 0x275a, 0x0) r1 VM DIAGNOSIS: Warning: Permanently added '10.128.0.221' (ED25519) to the list of known hosts. lock-classes: 6263 [max: 8192] direct dependencies: 47492 [max: 131072] indirect dependencies: 1021335 all direct dependencies: 2432023 dependency chains: 255133 [max: 262144] dependency chain hlocks used: 1310715 [max: 1310720] dependency chain hlocks lost: 0 in-hardirq chains: 113 in-softirq chains: 3214 in-process chains: 251805 stack-trace entries: 322701 [max: 1048576] number of stack traces: 16325 number of stack hash chains: 10357 combined max dependencies:hardirq-safe locks: 61 hardirq-unsafe locks: 5507 softirq-safe locks: 363 softirq-unsafe locks: 5099 irq-safe locks: 372 irq-unsafe locks: 5507 hardirq-read-safe locks: 5 hardirq-read-unsafe locks: 210 softirq-read-safe locks: 23 softirq-read-unsafe locks: 191 irq-read-safe locks: 23 irq-read-unsafe locks: 210 uncategorized locks: 409 unused locks: 0 max locking depth: 19 max bfs queue depth: 692 max lock class index: 6262 debug_locks: 0 zapped classes: 1275 zapped lock chains: 2875 large chain blocks: 0 all lock classes: FD: 36 BD: 1 +.+.: fill_pool_map-wait-type-override ->&____s->seqcount ->pool_lock#2 ->&c->lock ->pool_lock ->&zone->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->rcu_node_0 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq ->init_task.mems_allowed_seq.seqcount FD: 2 BD: 4988 -.-.: &obj_hash[i].lock ->pool_lock FD: 1 BD: 4989 -.-.: pool_lock FD: 840 BD: 17 +.+.: cgroup_mutex ->pcpu_alloc_mutex ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&obj_hash[i].lock ->cgroup_file_kn_lock ->css_set_lock ->&c->lock ->&____s->seqcount ->blkcg_pol_mutex ->&n->list_lock ->&zone->lock ->percpu_counters_lock ->shrinker_rwsem ->&base->lock ->batched_entropy_u8.lock ->&pgdat->memcg_lru.lock ->devcgroup_mutex ->cpu_hotplug_lock ->fs_reclaim ->&rq->__lock ->cgroup_rstat_lock ->cpuset_mutex ->&dom->lock ->batched_entropy_u32.lock ->cgroup_idr_lock ->task_group_lock ->(wq_completion)cpuset_migrate_mm ->&wq->mutex ->&____s->seqcount#2 ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->cgroup_mutex.wait_lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4288 -.-.: (console_sem).lock FD: 225 BD: 13 +.+.: console_lock ->console_owner_lock ->resource_lock ->pool_lock#2 ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount ->&c->lock ->kbd_event_lock ->vga_lock ->(console_sem).lock ->fs_reclaim ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#11 ->&fb_info->lock ->&base->lock ->subsys mutex#5 ->&helper->lock ->&helper->damage_lock ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock ->vt_event_lock FD: 1 BD: 11 ....: console_srcu FD: 289 BD: 133 ++++: cpu_hotplug_lock ->jump_label_mutex ->static_call_mutex ->cpuhp_state_mutex ->wq_pool_mutex ->freezer_mutex ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) ->smpboot_threads_lock ->&obj_hash[i].lock ->&pool->lock ->&x->wait#4 ->&rq->__lock ->mem_hotplug_lock ->mem_hotplug_lock.waiters.lock ->mem_hotplug_lock.rss.gp_wait.lock ->cpu_hotplug_lock.rss.gp_wait.lock ->rcu_node_0 ->&swhash->hlist_mutex ->pmus_lock ->pcp_batch_high_lock ->&xa->xa_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_attach_mutex ->pcpu_alloc_mutex ->relay_channels_mutex ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->text_mutex ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->(console_sem).lock ->rtc_lock ->sparse_irq_lock ->&x->wait#6 ->cpuhp_state-up ->cpu_hotplug_lock.waiters.lock ->stop_cpus_mutex ->&wq->mutex ->flush_lock ->&md->mutex ->&irq_desc_lock_class ->xps_map_mutex ->css_set_lock ->cpuset_mutex ->cgroup_threadgroup_rwsem ->cgroup_threadgroup_rwsem.waiters.lock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->jump_label_mutex.wait_lock ->&list->lock#5 ->&cfs_rq->removed.lock ->(work_completion)(flush) ->&x->wait#10 ->&rcu_state.expedited_wq FD: 59 BD: 140 +.+.: jump_label_mutex ->text_mutex ->&rq->__lock ->jump_label_mutex.wait_lock ->text_mutex.wait_lock ->&p->pi_lock ->&pool->lock ->rcu_node_0 FD: 58 BD: 134 +.+.: static_call_mutex ->text_mutex ->text_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 57 BD: 147 +.+.: text_mutex ->ptlock_ptr(page)#2 ->&rq->__lock ->text_mutex.wait_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&pool->lock ->pool_lock#2 ->&rcu_state.expedited_wq FD: 231 BD: 10 +.+.: console_mutex ->syslog_lock ->(console_sem).lock ->&port_lock_key ->console_lock ->console_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->console_srcu ->&root->kernfs_rwsem ->kernfs_notify_lock ->&rq->__lock FD: 27 BD: 11 +.+.: syslog_lock ->&rq->__lock FD: 1 BD: 4287 -.-.: console_owner_lock FD: 37 BD: 4286 -.-.: console_owner ->console_owner_lock ->&port_lock_key FD: 1 BD: 169 ..-.: input_pool.lock FD: 223 BD: 134 +.+.: cpuhp_state_mutex ->cpuhp_state-down ->cpuhp_state-up ->&p->pi_lock ->&x->wait#6 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->&zone->lock ->crypto_alg_sem ->scomp_lock FD: 291 BD: 1 +.+.: clocksource_mutex ->watchdog_lock ->cpu_hotplug_lock ->(console_sem).lock FD: 1 BD: 2 ....: watchdog_lock FD: 4 BD: 150 ++++: resource_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 ....: cache_disable_lock FD: 1 BD: 4271 +.+.: pgd_lock FD: 29 BD: 292 +.+.: init_mm.page_table_lock ->pgd_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 1 ....: early_pfn_lock FD: 177 BD: 1 +.+.: acpi_ioapic_lock ->ioapic_lock ->(console_sem).lock ->ioapic_mutex FD: 2 BD: 154 ....: ioapic_lock ->i8259A_lock FD: 1 BD: 1 +.+.: syscore_ops_lock FD: 1 BD: 1 ....: map_entries_lock FD: 1 BD: 7 ....: devtree_lock FD: 3 BD: 4296 ..-.: pcpu_lock ->stock_lock FD: 139 BD: 70 +.+.: param_lock ->rate_ctrl_mutex ->disk_events_mutex FD: 1 BD: 4970 ..-.: base_crng.lock FD: 1 BD: 1 ....: rcu_read_lock FD: 1 BD: 1 ....: crng_init_wait.lock FD: 2 BD: 1 ....: zonelist_update_seq ->zonelist_update_seq.seqcount FD: 1 BD: 2 ....: zonelist_update_seq.seqcount FD: 1 BD: 1 +.+.: dmar_global_lock FD: 2 BD: 4914 -.-.: &zone->lock ->&____s->seqcount FD: 1 BD: 4950 .-.-: &____s->seqcount FD: 3 BD: 4269 +.+.: &pcp->lock ->&zone->lock FD: 1 BD: 5012 -.-.: pool_lock#2 FD: 141 BD: 218 +.+.: pcpu_alloc_mutex ->pcpu_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&c->lock ->&obj_hash[i].lock ->&rq->__lock ->pcpu_alloc_mutex.wait_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&pool->lock ->&n->list_lock FD: 6 BD: 4928 -.-.: &n->list_lock ->&c->lock FD: 5 BD: 4957 -.-.: &c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 166 BD: 84 +.+.: slab_mutex ->pool_lock#2 ->&c->lock ->&n->list_lock ->pcpu_alloc_mutex ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->fs_reclaim ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->lock ->&root->kernfs_rwsem ->&k->list_lock ->&____s->seqcount#2 FD: 3 BD: 5 ....: batched_entropy_u64.lock ->crngs.lock FD: 2 BD: 4969 ..-.: crngs.lock ->base_crng.lock FD: 4 BD: 1 ....: espfix_init_mutex ->&zone->lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4271 ..-.: percpu_counters_lock FD: 7 BD: 4231 +.+.: &mm->page_table_lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->stock_lock FD: 11 BD: 4235 +.+.: ptlock_ptr(page) ->lock#4 FD: 53 BD: 4255 +.+.: ptlock_ptr(page)#2 ->lock#4 ->ptlock_ptr(page)#2/1 ->key ->&____s->seqcount ->pool_lock#2 ->lock#5 ->&zone->lock ->&lruvec->lru_lock ->&obj_hash[i].lock ->&folio_wait_table[i] ->&mapping->private_lock ->&pgdat->kswapd_wait FD: 141 BD: 3 +.+.: trace_types_lock ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->&obj_hash[i].lock FD: 1 BD: 1 ....: panic_notifier_list.lock FD: 1 BD: 1 ....: die_chain.lock FD: 143 BD: 4 +.+.: trace_event_sem ->trace_event_ida.xa_lock ->&rq->__lock ->trace_event_sem.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 3 BD: 3880 ..-.: batched_entropy_u32.lock ->crngs.lock FD: 26 BD: 4807 -.-.: &rq->__lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock/1 ->&cfs_rq->removed.lock ->&rt_b->rt_runtime_lock ->&cp->lock ->&rt_rq->rt_runtime_lock ->pool_lock#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4808 ....: &cfs_b->lock FD: 27 BD: 139 ....: init_task.pi_lock ->&rq->__lock FD: 1 BD: 1 ....: init_task.vtime_seqcount FD: 143 BD: 138 +.+.: wq_pool_mutex ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&wq->mutex ->&obj_hash[i].lock ->&pool->lock/1 ->fs_reclaim ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->wq_pool_attach_mutex ->(console_sem).lock ->&xa->xa_lock ->&n->list_lock ->&____s->seqcount#2 ->rcu_node_0 ->remove_cache_srcu ->&cfs_rq->removed.lock FD: 32 BD: 140 +.+.: &wq->mutex ->&pool->lock ->&pool->lock/1 ->&x->wait#10 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 4495 -.-.: &pool->lock ->&obj_hash[i].lock ->&p->pi_lock ->pool_lock#2 ->&base->lock ->wq_mayday_lock FD: 31 BD: 4363 ..-.: &pool->lock/1 ->&obj_hash[i].lock ->&p->pi_lock ->pool_lock#2 ->&base->lock ->wq_mayday_lock ->&x->wait#10 FD: 137 BD: 60 ++++: shrinker_rwsem ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount ->fs_reclaim ->&rq->__lock ->&obj_hash[i].lock ->rcu_node_0 ->&cfs_rq->removed.lock ->krc.lock ->f2fs_list_lock ->tk_core.seq.seqcount ->&sbi->s_es_lock ->&journal->j_list_lock FD: 1 BD: 4342 -.-.: rcu_node_0 FD: 11 BD: 76 -.-.: rcu_state.barrier_lock ->rcu_node_0 ->&obj_hash[i].lock FD: 30 BD: 3 ....: &rnp->exp_poll_lock FD: 9 BD: 5 ....: trace_event_ida.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock FD: 1 BD: 1 ....: trigger_cmd_mutex FD: 1 BD: 155 ....: i8259A_lock FD: 137 BD: 135 +.+.: irq_domain_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 32 BD: 291 +.+.: free_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 ->init_mm.page_table_lock ->quarantine_lock ->&base->lock ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 292 +.+.: vmap_area_lock FD: 7 BD: 151 -.-.: &irq_desc_lock_class ->i8259A_lock ->vector_lock ->ioapic_lock ->mask_lock ->tmp_mask_lock ->irq_resend_lock FD: 36 BD: 84 +.+.: vmap_purge_lock ->purge_vmap_area_lock ->free_vmap_area_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 16 BD: 96 +.+.: purge_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&base->lock ->&____s->seqcount FD: 2 BD: 83 +.+.: cpa_lock ->pgd_lock FD: 5 BD: 2 -.-.: timekeeper_lock ->tk_core.seq.seqcount ->pvclock_gtod_data FD: 4 BD: 4868 ----: tk_core.seq.seqcount ->&obj_hash[i].lock ->pvclock_gtod_data FD: 13 BD: 4888 -.-.: &base->lock ->&obj_hash[i].lock FD: 185 BD: 136 +.+.: pmus_lock ->pcpu_alloc_mutex ->pool_lock#2 ->&obj_hash[i].lock ->&cpuctx_mutex ->fs_reclaim ->&k->list_lock ->lock ->&root->kernfs_rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&x->wait#9 ->bus_type_sem ->&c->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#29 FD: 1 BD: 136 +.+.: &swhash->hlist_mutex FD: 1 BD: 137 +.+.: &cpuctx_mutex FD: 1 BD: 2 ....: tty_ldiscs_lock FD: 2 BD: 14 ....: kbd_event_lock ->led_lock FD: 1 BD: 15 ..-.: led_lock FD: 1 BD: 14 ....: vga_lock FD: 35 BD: 4289 -.-.: &port_lock_key ->&dev->power.lock ->&port->lock ->&tty->write_wait FD: 3 BD: 11 ....: console_srcu_srcu_usage.lock ->&obj_hash[i].lock FD: 1 BD: 41 ..-.: &ACCESS_PRIVATE(sdp, lock) FD: 42 BD: 3 +.+.: init_task.alloc_lock ->init_fs.lock FD: 36 BD: 1 +.+.: acpi_ioremap_lock ->pool_lock#2 ->resource_lock ->memtype_lock ->free_vmap_area_lock ->vmap_area_lock FD: 1 BD: 2 +.+.: memtype_lock FD: 1 BD: 17 ....: semaphore->lock FD: 1 BD: 13 ....: *(&acpi_gbl_reference_count_lock) FD: 9 BD: 1 ....: clockevents_lock ->tk_core.seq.seqcount ->tick_broadcast_lock ->i8253_lock FD: 3 BD: 2 -...: tick_broadcast_lock ->jiffies_lock FD: 1 BD: 2 ....: i8253_lock FD: 22 BD: 12 +.+.: &desc->request_mutex ->&irq_desc_lock_class ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 176 BD: 2 +.+.: ioapic_mutex ->&domain->mutex FD: 175 BD: 137 +.+.: &domain->mutex ->pool_lock#2 ->vector_lock ->&irq_desc_lock_class ->i8259A_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->sparse_irq_lock ->fs_reclaim FD: 1 BD: 154 -.-.: vector_lock FD: 1 BD: 1 +.+.: &pool->lock#2 FD: 2 BD: 3 -.-.: jiffies_lock ->jiffies_seq.seqcount FD: 1 BD: 4 -.-.: jiffies_seq.seqcount FD: 16 BD: 4835 -.-.: hrtimer_bases.lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 28 BD: 1 -.-.: log_wait.lock ->&p->pi_lock FD: 2 BD: 1 +.-.: drivers/char/random.c:1010 ->input_pool.lock FD: 290 BD: 2 +.+.: spec_ctrl_mutex ->cpu_hotplug_lock ->(console_sem).lock FD: 29 BD: 4326 +.+.: sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 38 BD: 2 +.+.: tomoyo_policy_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock ->&rq->__lock ->&n->list_lock ->remove_cache_srcu FD: 2 BD: 1 ....: aa_secids.xa_lock ->pool_lock#2 FD: 1 BD: 2 +.+.: aa_buffers_lock FD: 978 BD: 4 ++++: pernet_ops_rwsem ->stack_depot_init_mutex ->crngs.lock ->net_rwsem ->proc_inum_ida.xa_lock ->pool_lock#2 ->proc_subdir_lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->sysctl_lock ->pcpu_alloc_mutex ->&zone->lock ->net_generic_ids.xa_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->&obj_hash[i].lock ->k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK ->nl_table_lock ->nl_table_wait.lock ->rtnl_mutex ->uevent_sock_mutex ->&net->rules_mod_lock ->slab_mutex ->batched_entropy_u32.lock ->percpu_counters_lock ->pool_lock ->cache_list_lock ->tk_core.seq.seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&pool->lock/1 ->running_helpers_waitq.lock ->&sn->pipefs_sb_lock ->krc.lock ->&rq->__lock ->&s->s_inode_list_lock ->nf_hook_mutex ->cpu_hotplug_lock ->hwsim_netgroup_ida.xa_lock ->nf_connlabels_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->nf_ct_ecache_mutex ->nf_log_mutex ->ipvs->est_mutex ->&base->lock ->__ip_vs_app_mutex ->&hashinfo->lock#2 ->&net->ipv6.ip6addrlbl_table.lock ->(console_sem).lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->wq_pool_mutex ->pcpu_lock ->&list->lock#4 ->&dir->lock#2 ->ptype_lock ->k-clock-AF_TIPC ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&this->receive_lock ->once_lock ->nf_ct_proto_mutex ->k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->conn_lock ->&call->waitq ->&rx->call_lock ->&rxnet->call_lock ->&n->list_lock ->rdma_nets.xa_lock ->devices_rwsem ->rtnl_mutex.wait_lock ->&p->pi_lock ->uevent_sock_mutex.wait_lock ->rcu_node_0 ->remove_cache_srcu ->stock_lock ->&____s->seqcount#2 ->&net->nsid_lock ->ebt_mutex ->&xt[i].mutex ->&nft_net->commit_mutex ->netns_bpf_mutex ->&rnp->exp_wq[3] ->(&net->fs_probe_timer) ->&net->cells_lock ->(&net->cells_timer) ->bit_wait_table + i ->(&net->fs_timer) ->(wq_completion)kafsd ->&wq->mutex ->k-clock-AF_RXRPC ->&local->services_lock ->(wq_completion)krxrpcd ->rlock-AF_RXRPC ->&x->wait ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&ent->pde_unload_lock ->ovs_mutex ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->(work_completion)(&ovs_net->dp_notify_work) ->&srv->idr_lock ->&rnp->exp_lock ->&rnp->exp_wq[1] ->(work_completion)(&tn->work) ->&rnp->exp_wq[2] ->&tn->nametbl_lock ->&rnp->exp_wq[0] ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&c->work)->work) ->(wq_completion)krdsd ->(work_completion)(&rtn->rds_tcp_accept_w) ->rds_tcp_conn_lock ->loop_conns_lock ->(wq_completion)l2tp ->rcu_state.barrier_mutex ->(&rxnet->peer_keepalive_timer) ->(work_completion)(&rxnet->peer_keepalive_work) ->(&rxnet->service_conn_reap_timer) ->pcpu_alloc_mutex.wait_lock ->&cfs_rq->removed.lock ->dev_base_lock ->lweventlist_lock ->napi_hash_lock ->netdev_unregistering_wq.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rcu_state.exp_mutex.wait_lock ->rcu_state.barrier_mutex.wait_lock ->&rcu_state.expedited_wq ->&fn->fou_lock ->ipvs->sync_mutex ->hwsim_radio_lock ->rdma_nets_rwsem ->k-clock-AF_NETLINK ->&nlk->wait ->wlock-AF_NETLINK ->&hn->hn_lock ->&pnettable->lock ->&pnetids_ndev->lock ->k-sk_lock-AF_INET6/1 ->&net->sctp.addr_wq_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->&sn->gssp_lock ->&cd->hash_lock ->(&net->can.stattimer) ->xfrm_state_gc_work ->&net->xfrm.xfrm_state_lock ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->ip6_fl_lock ->(&net->ipv6.ip6_fib_timer) ->__ip_vs_mutex ->(&ipvs->dest_trash_timer) ->(work_completion)(&(&ipvs->expire_nodest_conn_work)->work) ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&ipvs->est_reload_work)->work) ->nfnl_subsys_ipset ->recent_lock ->hashlimit_mutex ->(work_completion)(&(&cnet->ecache.dwork)->work) ->sysfs_symlink_target_lock ->kernfs_idr_lock ->tcp_metrics_lock ->k-clock-AF_INET ->(work_completion)(&net->xfrm.policy_hash_work) ->&net->xfrm.xfrm_policy_lock ->(work_completion)(&net->xfrm.state_hash_work) ->&list->lock#2 ->&xa->xa_lock#3 ->genl_sk_destructing_waitq.lock ->quarantine_lock ->&meta->lock ->rcu_state.exp_mutex ->&x->wait#10 ->&sem->wait_lock ->pgd_lock ->key ->nf_conntrack_mutex ->vmap_area_lock ->purge_vmap_area_lock ->&device->compat_devs_mutex ->dev_pm_qos_sysfs_mtx ->subsys mutex#84 ->&x->wait#9 ->dpm_list_mtx ->&dev->power.lock ->deferred_probe_mutex ->device_links_lock ->gdp_mutex ->&device->unregistration_lock FD: 27 BD: 73 +.+.: stack_depot_init_mutex ->&rq->__lock FD: 157 BD: 3648 ++++: net_rwsem ->&list->lock#2 ->&rq->__lock ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&cfs_rq->removed.lock ->&data->lock ->&c->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&table->lock#4 ->&ndev->lock FD: 12 BD: 93 ....: proc_inum_ida.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&____s->seqcount#2 FD: 831 BD: 68 +.+.: rtnl_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->pcpu_alloc_mutex ->&xa->xa_lock#3 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->nl_table_lock ->nl_table_wait.lock ->net_rwsem ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->&rq->__lock ->krc.lock ->stack_depot_init_mutex ->cpu_hotplug_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_mutex ->crngs.lock ->&pool->lock/1 ->pool_lock ->&n->list_lock ->lweventlist_lock ->&pool->lock ->rtnl_mutex.wait_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&k->k_lock ->&cfs_rq->removed.lock ->param_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rdev->wiphy.mtx ->&base->lock ->subsys mutex#58 ->&sdata->sec_mtx ->&local->iflist_mtx#2 ->lock#7 ->failover_lock ->&tn->lock ->&idev->mc_lock ->&ndev->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->&pnettable->lock ->smc_ib_devices.mutex ->&(&net->nexthop.notifier_chain)->rwsem ->reg_requests_lock ->reg_pending_beacons_lock ->rlock-AF_NETLINK ->(inetaddr_validator_chain).rwsem ->(inetaddr_chain).rwsem ->_xmit_LOOPBACK ->netpoll_srcu ->&in_dev->mc_tomb_lock ->&im->lock ->fib_info_lock ->cbs_list_lock ->(inet6addr_validator_chain).rwsem ->&net->ipv6.addrconf_hash_lock ->&ifa->lock ->&tb->tb6_lock ->&dev_addr_list_lock_key ->napi_hash_lock ->lapb_list_lock ->x25_neigh_list_lock ->_xmit_ETHER ->_xmit_SLIP ->remove_cache_srcu ->&vi->refill_lock ->noop_qdisc.q.lock ->&sem->wait_lock ->&rfkill->lock ->&local->chanctx_mtx ->&dev->tx_global_lock ->rcu_node_0 ->&rnp->exp_wq[3] ->&sch->q.lock ->class ->(&tbl->proxy_timer) ->_xmit_VOID ->_xmit_X25 ->&lapbeth->up_lock ->&lapb->lock ->&rnp->exp_wq[1] ->&ul->lock#2 ->&n->lock ->&dir->lock ->dev_addr_sem ->_xmit_IEEE802154 ->reg_indoor_lock ->&nr_netdev_addr_lock_key ->listen_lock ->uevent_sock_mutex.wait_lock ->&r->consumer_lock ->&mm->mmap_lock ->&meta->lock ->pcpu_lock ->(switchdev_blocking_notif_chain).rwsem ->&br->hash_lock ->nf_hook_mutex ->j1939_netdev_lock ->&bat_priv->tvlv.handler_list_lock ->&bat_priv->tvlv.container_list_lock ->&bat_priv->softif_vlan_list_lock ->key#16 ->&bat_priv->tt.changes_list_lock ->kernfs_idr_lock ->&rnp->exp_wq[0] ->tk_core.seq.seqcount ->&wq->mutex ->init_lock ->&rnp->exp_wq[2] ->deferred_lock ->target_list_lock ->&br->lock ->&pn->hash_lock ->team->team_lock_key ->&rcu_state.expedited_wq ->&hard_iface->bat_iv.ogm_buff_mutex ->ptype_lock ->team->team_lock_key#2 ->_xmit_NONE ->lock#9 ->team->team_lock_key#3 ->&hsr->list_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->team->team_lock_key#4 ->team->team_lock_key#5 ->team->team_lock_key#6 ->mount_lock ->&xa->xa_lock#14 ->&dev_addr_list_lock_key#3/1 ->pgd_lock ->key ->percpu_counters_lock ->req_lock ->&x->wait#11 ->subsys mutex#82 ->bpf_devs_lock ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->&devlink_port->type_lock ->&vn->sock_lock ->devnet_rename_sem ->&nft_net->commit_mutex ->&ent->pde_unload_lock ->&wg->device_update_lock ->_xmit_SIT ->&bridge_netdev_addr_lock_key/1 ->_xmit_TUNNEL ->_xmit_IPGRE ->_xmit_TUNNEL6 ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#2/1 ->_xmit_ETHER/1 ->&nn->netlink_tap_lock ->&batadv_netdev_addr_lock_key/1 ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/1 ->&ipvlan->addrs_lock ->&macsec_netdev_addr_lock_key/1 ->key#21 ->&bat_priv->tt.commit_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET ->k-slock-AF_INET ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->pcpu_alloc_mutex.wait_lock ->&ul->lock ->&____s->seqcount#2 ->&lock->wait_lock ->qdisc_mod_lock ->&block->lock ->&block->cb_lock ->stock_lock ->&hwstats->hwsdev_list_lock ->&net->xdp.lock ->mirred_list_lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->&idev->mc_report_lock ->&pnn->pndevs.lock ->&pnn->routes.lock ->dev_pm_qos_sysfs_mtx ->deferred_probe_mutex ->device_links_lock ->__ip_vs_mutex ->flowtable_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->(&pmctx->ip6_mc_router_timer) ->(&pmctx->ip4_mc_router_timer) ->(work_completion)(&ht->run_work) ->&ht->mutex ->&br->multicast_lock ->rcu_state.exp_mutex.wait_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->wq_mayday_lock ->&batadv_netdev_addr_lock_key ->&bond->mode_lock ->(work_completion)(&(&bond->mii_work)->work) ->(work_completion)(&(&bond->arp_work)->work) ->(work_completion)(&(&bond->alb_work)->work) ->(work_completion)(&(&bond->ad_work)->work) ->&x->wait#10 ->(work_completion)(&(&bond->mcast_work)->work) ->(work_completion)(&(&bond->slave_arr_work)->work) ->&net->xfrm.xfrm_state_lock ->(work_completion)(&(&slave->notify_work)->work) ->sk_lock-AF_INET6 ->slock-AF_INET6 ->&tun->lock ->wlock-AF_UNSPEC ->elock-AF_UNSPEC ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->&caifn->caifdevs.lock ->&net->rules_mod_lock ->(&mrt->ipmr_expire_timer) ->nf_connlabels_lock ->&macvlan_netdev_addr_lock_key/2 ->(work_completion)(&port->bc_work) ->&p->alloc_lock ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->_xmit_NETROM#2 ->&this->info_list_lock ->sk_lock-AF_UNSPEC ->slock-AF_UNSPEC ->&vlan_netdev_addr_lock_key/2 ->&app->lock#2 ->&app->lock ->bpf_dispatcher_xdp.mutex ->(&brmctx->ip4_mc_router_timer) ->(&brmctx->ip4_other_query.timer) ->(&brmctx->ip4_own_query.timer) ->(&brmctx->ip6_mc_router_timer) ->(&brmctx->ip6_other_query.timer) ->(&brmctx->ip6_own_query.timer) ->(work_completion)(&br->mcast_gc_work) ->rcu_state.barrier_mutex ->&bridge_netdev_addr_lock_key ->(work_completion)(&(&br->gc_work)->work) ->(&br->hello_timer) ->(&br->topology_change_timer) ->(&br->tcn_timer) ->&pmc->lock ->&data->lock ->(&app->join_timer) ->(&app->periodic_timer) ->&list->lock#11 ->(&app->join_timer)#2 ->&list->lock#12 ->&table->hash[i].lock ->k-clock-AF_INET ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&list->lock#2 ->&dev_addr_list_lock_key#2 ->team->team_lock_key#7 ->team->team_lock_key#8 ->team->team_lock_key#9 ->sk_lock-AF_CAN ->slock-AF_CAN ->act_mod_lock ->&tn->idrinfo->lock ->&p->tcfa_lock ->flow_indr_block_lock ->cls_mod_lock ->sk_lock-AF_INET ->slock-AF_INET ->&tn->idrinfo->lock#2 ->team->team_lock_key#10 ->team->team_lock_key#11 ->&chain->filter_chain_lock ->&block->proto_destroy_lock ->&dev_addr_list_lock_key/2 ->&xs->mutex ->acaddr_hash_lock ->&tp->lock ->&xa->xa_lock#16 ->&head->masks_lock ->&bridge_netdev_addr_lock_key/2 ->&dev_addr_list_lock_key#3/2 ->(work_completion)(&port->wq) ->&tn->idrinfo->lock#3 ->(&mp->timer) ->&pgdat->kswapd_wait ->&tn->idrinfo->lock#4 ->(work_completion)(&q->work) ->ife_mod_lock ->&tn->idrinfo->lock#5 ->&pn->all_ppp_mutex ->&ppp->rlock ->&ppp->wlock ->&dev_addr_list_lock_key#4 ->&pf->rwait ->&dev_addr_list_lock_key#2/4 ->hrtimer_bases.lock FD: 35 BD: 237 +.+.: lock ->kernfs_idr_lock ->cgroup_idr_lock ->pidmap_lock ->drm_minor_lock ->&file_private->table_lock ->&q->queue_lock ->sg_index_lock ->map_idr_lock ->prog_idr_lock ->btf_idr_lock ->&group->inotify_data.idr_lock ->link_idr_lock ->sctp_assocs_id_lock FD: 13 BD: 4254 +.+.: kernfs_idr_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 142 BD: 240 ++++: &root->kernfs_rwsem ->&root->kernfs_iattr_rwsem ->kernfs_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&zone->lock ->&____s->seqcount ->&cfs_rq->removed.lock ->quarantine_lock ->rcu_node_0 ->inode_hash_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&c->lock ->remove_cache_srcu ->&n->list_lock ->&sem->wait_lock ->&sb->s_type->i_lock_key#30 ->&sb->s_type->i_lock_key#31 ->&meta->lock ->kfence_freelist_lock ->kernfs_rename_lock ->&xa->xa_lock#13 ->stock_lock ->&____s->seqcount#2 ->&p->pi_lock ->&base->lock ->&rcu_state.expedited_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 4 ++++: file_systems_lock FD: 138 BD: 246 ++++: &root->kernfs_iattr_rwsem ->&rq->__lock ->pgd_lock ->pool_lock#2 ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->iattr_mutex ->&sem->wait_lock ->tk_core.seq.seqcount ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 5 BD: 57 +.+.: sb_lock ->unnamed_dev_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 148 BD: 1 +.+.: &type->s_umount_key/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&dentry->d_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start FD: 1 BD: 40 +.+.: list_lrus_mutex FD: 1 BD: 58 ....: unnamed_dev_ida.xa_lock FD: 1 BD: 11 +.+.: &sbinfo->stat_lock FD: 55 BD: 4268 +.+.: &sb->s_type->i_lock_key ->&dentry->d_lock ->&xa->xa_lock#7 ->&dentry->d_lock/1 ->bit_wait_table + i FD: 1 BD: 4260 +.+.: &s->s_inode_list_lock FD: 39 BD: 4325 +.+.: &dentry->d_lock ->&wq ->&dentry->d_lock/1 ->&wq#2 ->&lru->node[i].lock ->sysctl_lock ->&wq#3 ->&dentry->d_lock/2 ->&p->pi_lock FD: 2 BD: 29 ....: mnt_id_ida.xa_lock ->pool_lock#2 FD: 43 BD: 193 +.+.: mount_lock ->mount_lock.seqcount ->&dentry->d_lock ->&obj_hash[i].lock ->&____s->seqcount ->pool_lock#2 FD: 41 BD: 198 +.+.: mount_lock.seqcount ->&new_ns->poll ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 ->&p->pi_lock FD: 146 BD: 1 +.+.: &type->s_umount_key#2/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key#2 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 4268 +.+.: &sb->s_type->i_lock_key#2 ->&dentry->d_lock FD: 1 BD: 2 ..-.: ucounts_lock FD: 41 BD: 197 +.+.: init_fs.lock ->init_fs.seq.seqcount ->&dentry->d_lock FD: 1 BD: 194 +.+.: init_fs.seq.seqcount FD: 146 BD: 1 +.+.: &type->s_umount_key#3/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 54 BD: 4271 +.+.: &sb->s_type->i_lock_key#3 ->&dentry->d_lock ->&xa->xa_lock#7 FD: 1 BD: 135 +.+.: cpuhp_state-down FD: 218 BD: 135 +.+.: cpuhp_state-up ->smpboot_threads_lock ->sparse_irq_lock ->&swhash->hlist_mutex ->pmus_lock ->&x->wait#5 ->&obj_hash[i].lock ->hrtimer_bases.lock ->wq_pool_mutex ->rcu_node_0 ->&rq->__lock ->jump_label_mutex ->fs_reclaim ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#24 ->subsys mutex#25 ->&cfs_rq->removed.lock ->&k->k_lock ->subsys mutex#79 ->&base->lock ->swap_slots_cache_mutex FD: 1 BD: 96 ++++: proc_subdir_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#4/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&sb->s_type->i_lock_key#4 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#4 ->&dentry->d_lock FD: 31 BD: 142 ....: cgroup_file_kn_lock ->kernfs_notify_lock FD: 33 BD: 141 ..-.: css_set_lock ->cgroup_file_kn_lock ->&p->pi_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 9 BD: 238 +...: cgroup_idr_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 155 BD: 136 +.+.: cpuset_mutex ->callback_lock ->jump_label_mutex ->&p->pi_lock ->&p->alloc_lock ->cpuset_attach_wq.lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->css_set_lock FD: 1 BD: 137 ....: callback_lock FD: 142 BD: 18 +.+.: blkcg_pol_mutex ->pcpu_alloc_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock FD: 3 BD: 4959 -.-.: batched_entropy_u8.lock ->crngs.lock FD: 1 BD: 4248 ....: &pgdat->memcg_lru.lock FD: 1 BD: 18 +.+.: devcgroup_mutex FD: 48 BD: 136 +.+.: freezer_mutex ->freezer_lock ->&rq->__lock ->freezer_mutex.wait_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 47 BD: 185 +.+.: rcu_state.exp_mutex ->rcu_node_0 ->rcu_state.exp_wake_mutex ->&obj_hash[i].lock ->&pool->lock ->&rnp->exp_wq[2] ->&rq->__lock ->&rnp->exp_wq[3] ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_state.exp_mutex.wait_lock ->&rcu_state.expedited_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock FD: 37 BD: 224 +.+.: rcu_state.exp_wake_mutex ->rcu_node_0 ->&rnp->exp_lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_state.exp_wake_mutex.wait_lock ->&obj_hash[i].lock ->pool_lock#2 ->&pool->lock FD: 1 BD: 234 +.+.: &rnp->exp_lock FD: 28 BD: 237 ....: &rnp->exp_wq[0] ->&p->pi_lock FD: 28 BD: 238 ....: &rnp->exp_wq[1] ->&p->pi_lock FD: 1 BD: 139 ....: init_sighand.siglock FD: 1 BD: 3 +.+.: init_files.file_lock FD: 13 BD: 255 ....: pidmap_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 184 BD: 135 ++++: cgroup_threadgroup_rwsem ->css_set_lock ->&p->pi_lock ->tk_core.seq.seqcount ->tasklist_lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&sighand->siglock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->inode_hash_lock ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#30 ->&root->kernfs_iattr_rwsem ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&c->lock ->cpuset_mutex ->&p->alloc_lock ->freezer_mutex ->&____s->seqcount#2 ->&____s->seqcount ->freezer_mutex.wait_lock ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_state.exp_mutex.wait_lock ->cgroup_threadgroup_rwsem.waiters.lock FD: 27 BD: 4731 -.-.: &p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 68 BD: 138 .+.+: tasklist_lock ->init_task.pi_lock ->init_sighand.siglock ->&p->pi_lock ->&sighand->siglock ->&pid->wait_pidfd ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&p->alloc_lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock FD: 1 BD: 4808 -.-.: &per_cpu_ptr(group->pcpu, cpu)->seq FD: 1 BD: 1 ....: (kthreadd_done).wait.lock FD: 42 BD: 153 ..-.: &sighand->siglock ->&sig->wait_chldexit ->input_pool.lock ->&(&sig->stats_lock)->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->hrtimer_bases.lock ->&p->pi_lock ->&obj_hash[i].lock ->&sighand->signalfd_wqh ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&tty->ctrl.lock ->&prev->lock ->&rq->__lock ->stock_lock ->&n->list_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock ->quarantine_lock FD: 49 BD: 195 +.+.: &p->alloc_lock ->&____s->seqcount#2 ->init_fs.lock ->&fs->lock ->&x->wait ->&memcg->mm_list.lock ->&x->wait#25 ->&newf->file_lock ->&p->pi_lock FD: 1 BD: 4932 .-.-: &____s->seqcount#2 FD: 136 BD: 4247 +.+.: fs_reclaim ->mmu_notifier_invalidate_range_start ->&mapping->i_mmap_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&zone->lock ->&pgdat->kcompactd_wait ->lock#4 ->lock#5 ->batched_entropy_u8.lock ->&pgdat->memcg_lru.lock ->&lruvec->lru_lock ->&mapping->private_lock ->stock_lock ->&sb->s_type->i_lock_key#3 ->swap_slots_cache_mutex ->&cache->alloc_lock ->shmem_swaplist_mutex ->&p->lock#2 ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&tree->lock ->&xa->xa_lock#21 ->remove_cache_srcu ->&sb->s_type->i_lock_key#22 ->&vmpr->sr_lock ->&base->lock ->quarantine_lock ->&sem->wait_lock ->&p->pi_lock ->&folio_wait_table[i] ->&memcg->mm_list.lock FD: 36 BD: 4261 +.+.: mmu_notifier_invalidate_range_start ->dma_fence_map ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->pool_lock#2 ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 165 +.+.: kthread_create_lock FD: 28 BD: 226 ....: &x->wait ->&p->pi_lock FD: 34 BD: 140 +.+.: wq_pool_attach_mutex ->&p->pi_lock ->&x->wait#7 ->&pool->lock ->&pool->lock/1 ->&rq->__lock ->wq_pool_attach_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 28 BD: 4513 ..-.: wq_mayday_lock ->&p->pi_lock FD: 7 BD: 139 ....: &xa->xa_lock ->&c->lock ->pool_lock#2 FD: 32 BD: 1 +.-.: (&pool->mayday_timer) ->&pool->lock/1 ->&obj_hash[i].lock ->&base->lock ->&pool->lock FD: 57 BD: 1 +.+.: (wq_completion)rcu_gp ->(work_completion)(&rnp->exp_poll_wq) ->(work_completion)(&(&ssp->srcu_sup->work)->work) ->(work_completion)(&sdp->work) ->(work_completion)(&rew->rew_work) ->&rq->__lock FD: 31 BD: 2 +.+.: (work_completion)(&rnp->exp_poll_wq) ->&rnp->exp_poll_lock FD: 14 BD: 1 +.-.: (&wq_watchdog_timer) ->&obj_hash[i].lock ->&base->lock FD: 925 BD: 1 +.+.: (wq_completion)events_unbound ->(work_completion)(&(&kfence_timer)->work) ->(work_completion)(&entry->work) ->(next_reseed).work ->(work_completion)(&sub_info->work) ->(stats_flush_dwork).work ->deferred_probe_work ->(work_completion)(&map->work) ->(work_completion)(&barr->work) ->connector_reaper_work ->(reaper_work).work ->&rq->__lock ->(work_completion)(&port->bc_work) ->(work_completion)(&pool->idle_cull_work) FD: 291 BD: 2 +.+.: (work_completion)(&(&kfence_timer)->work) ->cpu_hotplug_lock ->allocation_wait.lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 28 BD: 3 -.-.: allocation_wait.lock ->&p->pi_lock FD: 1 BD: 4954 -.-.: kfence_freelist_lock FD: 1 BD: 4294 ..-.: &meta->lock FD: 5 BD: 2 ....: rcu_tasks.cbs_gbl_lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) FD: 3 BD: 3 ....: rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock FD: 3 BD: 137 ....: &ACCESS_PRIVATE(rtpcp, lock) ->&obj_hash[i].lock FD: 5 BD: 2 ....: rcu_tasks_trace.cbs_gbl_lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) FD: 3 BD: 135 ....: rcu_tasks_trace__percpu.cbs_pcpu_lock ->&obj_hash[i].lock FD: 57 BD: 1 +.+.: rcu_tasks.tasks_gp_mutex ->rcu_tasks.cbs_gbl_lock ->&rq->__lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock ->&base->lock ->tasks_rcu_exit_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->tasks_rcu_exit_srcu ->&x->wait#3 ->kernel/rcu/tasks.h:147 ->(&timer.timer) ->&x->wait#2 ->(console_sem).lock ->console_owner_lock ->console_owner FD: 28 BD: 3 ....: &x->wait#2 ->&p->pi_lock FD: 28 BD: 239 ....: &rnp->exp_wq[2] ->&p->pi_lock FD: 31 BD: 8 ....: tasks_rcu_exit_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 1 BD: 2 ....: tasks_rcu_exit_srcu FD: 39 BD: 4 +.+.: (work_completion)(&(&ssp->srcu_sup->work)->work) ->&ssp->srcu_sup->srcu_gp_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock ->&ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&rq->__lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->tracepoint_srcu_srcu_usage.lock ->&cfs_rq->removed.lock ->rcu_node_0 ->pool_lock#2 FD: 38 BD: 5 +.+.: &ssp->srcu_sup->srcu_gp_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->&ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->tracepoint_srcu_srcu_usage.lock ->rcu_node_0 ->pool_lock#2 FD: 28 BD: 23 ....: &x->wait#3 ->&p->pi_lock FD: 293 BD: 1 +.+.: rcu_tasks_trace.tasks_gp_mutex ->rcu_tasks_trace.cbs_gbl_lock ->&rq->__lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->cpu_hotplug_lock ->&x->wait#2 ->&obj_hash[i].lock ->&base->lock ->(&timer.timer) ->(console_sem).lock FD: 5 BD: 1 -.-.: (null) ->tk_core.seq.seqcount FD: 30 BD: 3 ..-.: &(&ssp->srcu_sup->work)->timer FD: 36 BD: 6 +.+.: &ssp->srcu_sup->srcu_cb_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->tracepoint_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 33 BD: 4 +.+.: (work_completion)(&sdp->work) ->&ACCESS_PRIVATE(sdp, lock) ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&base->lock ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 2 ....: kernel/rcu/tasks.h:147 FD: 32 BD: 1 ..-.: &(&kfence_timer)->timer FD: 28 BD: 213 +.-.: (&timer.timer) ->&p->pi_lock FD: 28 BD: 237 ....: &rnp->exp_wq[3] ->&p->pi_lock FD: 1 BD: 1 ....: &nmi_desc[0].lock FD: 139 BD: 136 +.+.: smpboot_threads_lock ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 28 BD: 4175 ..-.: &rcu_state.gp_wq ->&p->pi_lock FD: 27 BD: 325 -.-.: &stop_pi_lock ->&rq->__lock FD: 1 BD: 325 -.-.: &stopper->lock FD: 1 BD: 2 +.+.: (module_notify_list).rwsem FD: 1 BD: 1 +.+.: ddebug_lock FD: 1 BD: 1 ....: rcu_callback FD: 1 BD: 1 .+.+: &pmus_srcu FD: 290 BD: 1 +.+.: watchdog_mutex ->cpu_hotplug_lock FD: 28 BD: 135 ....: &x->wait#4 ->&p->pi_lock FD: 993 BD: 1 +.+.: (wq_completion)events ->(work_completion)(&sscs.work) ->pcpu_balance_work ->(work_completion)(&pwq->unbound_release_work) ->(shepherd).work ->(work_completion)(&rfkill_global_led_trigger_work) ->timer_update_work ->(work_completion)(&p->wq) ->(work_completion)(&(&group->avgs_work)->work) ->(work_completion)(&(&krcp->monitor_work)->work) ->(work_completion)(&helper->damage_work) ->(work_completion)(&rfkill->sync_work) ->(linkwatch_work).work ->(work_completion)(&w->work) ->(work_completion)(&vi->config_work) ->(work_completion)(&blkg->free_work) ->(work_completion)(&gadget->work) ->kernfs_notify_work ->async_lookup_work ->autoload_work ->(work_completion)(&barr->work) ->(debug_obj_work).work ->drain_vmap_work ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->netstamp_work ->reg_work ->(work_completion)(&fw_work->work) ->(delayed_fput_work).work ->(work_completion)(&s->destroy_work) ->(work_completion)(&aux->work) ->(work_completion)(&ht->run_work) ->(work_completion)(&w->w) ->(work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->(deferred_probe_timeout_work).work ->(work_completion)(&w->work)#2 ->(regulator_init_complete_work).work ->(work_completion)(&cgrp->bpf.release_work) ->deferred_process_work ->(work_completion)(&data->fib_event_work) ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->(work_completion)(&(&conn->info_timer)->work) ->(work_completion)(&rdev->wiphy_work) ->wireless_nlevent_work ->free_ipc_work ->fqdir_free_work ->(work_completion)(&msk->work) ->&rq->__lock ->(work_completion)(&(&cnet->ecache.dwork)->work) ->(work_completion)(&nlk->work) ->(work_completion)(&aux->work)#2 ->(work_completion)(&data->dm_alert_work) ->(ima_keys_delayed_work).work ->((tcp_md5_needed).work).work ->(work_completion)(&work->work)#2 ->(work_completion)(&umem->work) ->xfrm_state_gc_work ->(work_completion)(&rdev->conn_work) ->(work_completion)(&smcibdev->port_event_work) ->(work_completion)(&(&krcp->page_cache_work)->work) ->(work_completion)(&(&sw_ctx_tx->tx_work.work)->work) ->rcu_node_0 FD: 30 BD: 2 +.+.: (work_completion)(&sscs.work) ->&x->wait#5 ->&obj_hash[i].lock ->hrtimer_bases.lock ->&x->wait#4 FD: 1 BD: 137 -.-.: &x->wait#5 FD: 2 BD: 196 +.+.: &newf->file_lock ->&newf->resize_wait FD: 1 BD: 1 ....: &p->vtime.seqcount FD: 39 BD: 134 +.+.: mem_hotplug_lock ->mem_hotplug_lock.rss.gp_wait.lock FD: 3 BD: 135 ..-.: mem_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 134 ....: mem_hotplug_lock.waiters.lock FD: 292 BD: 1 +.+.: cpu_add_remove_lock ->cpu_hotplug_lock ->cpu_hotplug_lock.waiters.lock ->cpu_hotplug_lock.rss.gp_wait.lock ->spec_ctrl_mutex ->cpuset_hotplug_work ->&rq->__lock FD: 3 BD: 134 ..-.: cpu_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 28 BD: 134 ....: cpu_hotplug_lock.waiters.lock ->&p->pi_lock FD: 27 BD: 8 +.+.: cpuset_hotplug_work ->&rq->__lock FD: 1 BD: 135 +.+.: pcp_batch_high_lock FD: 1 BD: 134 +.+.: relay_channels_mutex FD: 1 BD: 142 ....: rtc_lock FD: 174 BD: 140 +.+.: sparse_irq_lock ->tk_core.seq.seqcount ->rtc_lock ->&x->wait#6 ->&rq->__lock ->&p->pi_lock ->&irq_desc_lock_class ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->pcpu_alloc_mutex ->&obj_hash[i].lock FD: 28 BD: 141 ....: &x->wait#6 ->&p->pi_lock FD: 1 BD: 4808 ....: &rq->__lock/1 FD: 1 BD: 4809 -.-.: &cfs_rq->removed.lock FD: 1 BD: 141 ....: &x->wait#7 FD: 18 BD: 4808 -...: &rt_b->rt_runtime_lock ->&rt_rq->rt_runtime_lock ->tk_core.seq.seqcount ->hrtimer_bases.lock FD: 1 BD: 4809 -...: &rt_rq->rt_runtime_lock FD: 31 BD: 134 +.+.: stop_cpus_mutex ->&stopper->lock ->&stop_pi_lock ->&rq->__lock ->&x->wait#8 FD: 28 BD: 136 ....: &x->wait#8 ->&p->pi_lock FD: 142 BD: 1 +.+.: sched_domains_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->pcpu_alloc_mutex ->&zone->lock ->&____s->seqcount ->&c->lock ->pcpu_lock FD: 1 BD: 4808 ....: &cp->lock FD: 1 BD: 1 +.+.: (memory_chain).rwsem FD: 148 BD: 1 +.+.: &type->s_umount_key#5/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#5 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&dentry->d_lock FD: 54 BD: 4268 +.+.: &sb->s_type->i_lock_key#5 ->&dentry->d_lock ->&xa->xa_lock#7 FD: 28 BD: 1 ....: (setup_done).wait.lock ->&p->pi_lock FD: 149 BD: 26 ++++: namespace_sem ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->rename_lock ->&obj_hash[i].lock ->&new_ns->ns_lock ->&rq->__lock ->&____s->seqcount#2 ->stock_lock ->remove_cache_srcu ->rcu_node_0 ->&n->list_lock ->namespace_sem.wait_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->batched_entropy_u8.lock ->kfence_freelist_lock ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 187 +.+.: &____s->seqcount#3 FD: 138 BD: 1 +.+.: &type->s_umount_key#6 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&lru->node[i].lock ->&sbinfo->stat_lock ->&obj_hash[i].lock FD: 28 BD: 4327 +.+.: &lru->node[i].lock FD: 142 BD: 8 ++++: &sb->s_type->i_mutex_key ->namespace_sem ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#2 ->&wb->list_lock ->&c->lock ->&zone->lock ->&rq->__lock ->&____s->seqcount ->&obj_hash[i].lock ->&cfs_rq->removed.lock ->&dentry->d_lock/1 ->rcu_node_0 FD: 41 BD: 28 +.+.: rename_lock ->rename_lock.seqcount FD: 40 BD: 218 +.+.: rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/2 FD: 1 BD: 199 ....: &new_ns->poll FD: 2 BD: 4328 +.+.: &____s->seqcount#4 ->&____s->seqcount#4/1 FD: 41 BD: 196 +.+.: &fs->lock ->&____s->seqcount#3 ->&dentry->d_lock FD: 1 BD: 169 +.+.: req_lock FD: 160 BD: 1 +.+.: of_mutex ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem FD: 1 BD: 198 ....: &x->wait#9 FD: 1 BD: 224 +.+.: &k->list_lock FD: 27 BD: 204 ++++: bus_type_sem ->&rq->__lock FD: 32 BD: 4300 -...: &dev->power.lock ->&dev->power.lock/1 ->&dev->power.wait_queue ->hrtimer_bases.lock FD: 39 BD: 200 +.+.: dpm_list_mtx ->&rq->__lock ->(console_sem).lock FD: 146 BD: 211 +.+.: uevent_sock_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&zone->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->rlock-AF_NETLINK ->rcu_node_0 ->&rq->__lock ->&n->list_lock ->remove_cache_srcu ->uevent_sock_mutex.wait_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->mmu_notifier_invalidate_range_start ->quarantine_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->&data->lock ->stock_lock FD: 1 BD: 200 ....: running_helpers_waitq.lock FD: 1 BD: 215 +.+.: sysfs_symlink_target_lock FD: 2 BD: 273 +.+.: &k->k_lock ->klist_remove_lock FD: 1 BD: 1 ....: &dev->mutex FD: 1 BD: 1 +.+.: subsys mutex FD: 2 BD: 1 +.+.: memory_blocks.xa_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#2 FD: 139 BD: 12 +.+.: register_lock ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 2 +.+.: (pm_chain_head).rwsem FD: 1 BD: 1 +.+.: cpufreq_governor_mutex FD: 42 BD: 2 +.+.: (work_completion)(&rew->rew_work) ->rcu_node_0 ->rcu_state.exp_wake_mutex ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->pool_lock#2 ->pool_lock ->&cfs_rq->removed.lock ->rcu_state.exp_wake_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 1 +.+.: dyn_event_ops_mutex FD: 1 BD: 2 ++++: binfmt_lock FD: 1 BD: 108 +.+.: pin_fs_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#7/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#6 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 3 +.+.: &sb->s_type->i_lock_key#6 ->&dentry->d_lock FD: 139 BD: 1 +.+.: &sb->s_type->i_mutex_key#2 ->&sb->s_type->i_lock_key#6 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock FD: 28 BD: 4326 ....: &wq ->&p->pi_lock FD: 1 BD: 36 +.+.: chrdevs_lock FD: 892 BD: 1 ++++: cb_lock ->genl_mutex ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->&c->lock ->rtnl_mutex ->&obj_hash[i].lock ->&rdev->wiphy.mtx ->nlk_cb_mutex-GENERIC ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->&lock->wait_lock ->genl_mutex.wait_lock ->&____s->seqcount#2 ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&devlink->lock_key#6 ->quarantine_lock ->remove_cache_srcu ->&devlink->lock_key#4 ->&dir->lock#2 ->(console_sem).lock ->&devlink->lock_key#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->pcpu_alloc_mutex ->pcpu_alloc_mutex.wait_lock ->crngs.lock ->ovs_mutex ->ovs_mutex.wait_lock ->nl_table_lock ->nl_table_wait.lock ->pcpu_lock ->&data->lock ->&base->lock FD: 876 BD: 3 +.+.: genl_mutex ->fs_reclaim ->pool_lock#2 ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&n->list_lock ->rtnl_mutex ->&rq->__lock ->&zone->lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->hwsim_radio_lock ->&x->wait#9 ->batched_entropy_u32.lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#56 ->device_links_lock ->&k->k_lock ->deferred_probe_mutex ->cpu_hotplug_lock ->wq_pool_mutex ->crngs.lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(inetaddr_chain).rwsem ->inet6addr_chain.lock ->&____s->seqcount#2 ->genl_mutex.wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->&base->lock ->smcd_dev_list.mutex ->key#16 ->quarantine_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->key#25 ->remove_cache_srcu ->&bat_priv->tp_list_lock ->kthread_create_lock ->&x->wait ->net_dm_mutex ->&data->lock ->&cfs_rq->removed.lock ->__ip_vs_mutex ->&net->smc.mutex_fback_rsn ->&sem->wait_lock ->uevent_sock_mutex.wait_lock ->&rcu_state.expedited_wq ->&ht->lock ->(&timer.timer) ->nbd_index_mutex ->&nbd->config_lock ->&pernet->lock ->&pn->l2tp_tunnel_idr_lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->l2tp_ip6_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->k-sk_lock-AF_INET ->k-slock-AF_INET ->&table->hash[i].lock ->k-clock-AF_INET ->sk_lock-AF_TIPC ->slock-AF_TIPC ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->sk_lock-AF_NETLINK ->slock-AF_NETLINK FD: 147 BD: 1 +.+.: &type->s_umount_key#8/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 101 +.+.: &sb->s_type->i_lock_key#7 ->&dentry->d_lock FD: 142 BD: 99 +.+.: &sb->s_type->i_mutex_key#3 ->&sb->s_type->i_lock_key#7 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->(console_sem).lock ->rcu_node_0 ->remove_cache_srcu ->pin_fs_lock ->mount_lock ->&fsnotify_mark_srcu ->&xa->xa_lock#7 ->&____s->seqcount#2 ->&xa->xa_lock#13 ->stock_lock FD: 1 BD: 4 +.+.: subsys mutex#3 FD: 4 BD: 6 ....: async_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 399 BD: 2 +.+.: (work_completion)(&entry->work) ->tk_core.seq.seqcount ->&dev->power.lock ->&k->list_lock ->sysfs_symlink_target_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&x->wait#9 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->async_lock ->async_done.lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->sb_writers#2 ->&pool->lock/1 ->cpu_hotplug_lock ->wq_pool_mutex ->&n->list_lock ->pcpu_alloc_mutex ->batched_entropy_u32.lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->queue_lock ->&rq->__lock ->major_names_lock ->floppy_lock ->rtc_lock ->&wq->mutex ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock ->resource_lock ->&base->lock ->(&timer.timer) ->command_done.lock ->&shost->scan_mutex ->(console_sem).lock ->console_owner_lock ->console_owner ->async_scan_lock ->&cfs_rq->removed.lock ->&q->debugfs_mutex ->klist_remove_lock ->kernfs_idr_lock ->(&motor_off_timer[drive]) ->&xa->xa_lock#8 ->&q->unused_hctx_lock ->(&sq->pending_timer) ->(work_completion)(&td->dispatch_work) ->&q->blkcg_mutex ->pcpu_lock ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 22 .+.+: device_links_srcu FD: 1 BD: 3 +.+.: regulator_list_mutex FD: 3 BD: 21 +.+.: fwnode_link_lock ->&k->k_lock FD: 30 BD: 91 +.+.: device_links_lock ->&k->list_lock ->&k->k_lock ->&rq->__lock FD: 1 BD: 4 ....: &dev->devres_lock FD: 1 BD: 4 +.+.: regulator_nesting_mutex FD: 2 BD: 1 +.+.: regulator_ww_class_mutex ->regulator_nesting_mutex FD: 163 BD: 182 +.+.: gdp_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->kobj_ns_type_lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock ->sysfs_symlink_target_lock ->&rq->__lock ->&____s->seqcount#2 ->kernfs_idr_lock ->&sem->wait_lock ->&p->pi_lock FD: 3 BD: 3 +.+.: subsys mutex#4 ->&k->k_lock FD: 27 BD: 91 +.+.: deferred_probe_mutex ->&rq->__lock FD: 1 BD: 20 ....: probe_waitqueue.lock FD: 1 BD: 3 ....: async_done.lock FD: 146 BD: 1 +.+.: &type->s_umount_key#9/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 4269 +.+.: &sb->s_type->i_lock_key#8 ->&dentry->d_lock ->bit_wait_table + i FD: 146 BD: 82 +.+.: pack_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->vmap_purge_lock ->cpa_lock ->text_mutex ->pack_mutex.wait_lock ->&pool->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->text_mutex.wait_lock ->&p->pi_lock FD: 28 BD: 4284 -.-.: &rcu_state.expedited_wq ->&p->pi_lock FD: 28 BD: 81 +.+.: &fp->aux->used_maps_mutex ->&rq->__lock ->&map->owner.lock FD: 1 BD: 1 +.+.: proto_list_mutex FD: 1 BD: 1 +.+.: targets_mutex FD: 29 BD: 3909 ...-: nl_table_lock ->pool_lock#2 ->nl_table_wait.lock ->&obj_hash[i].lock ->&c->lock FD: 28 BD: 3910 ..-.: nl_table_wait.lock ->&p->pi_lock FD: 1 BD: 1 +.+.: net_family_lock FD: 2 BD: 5 ....: net_generic_ids.xa_lock ->pool_lock#2 FD: 6 BD: 3693 ..-.: &dir->lock ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount FD: 36 BD: 5 +.+.: k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK ->rcu_node_0 ->&rq->__lock FD: 1 BD: 6 +...: k-slock-AF_NETLINK FD: 31 BD: 3688 ..-.: rhashtable_bucket ->rhashtable_bucket/1 FD: 43 BD: 137 ....: freezer_lock ->&sighand->siglock FD: 1 BD: 1 ....: audit_backlog_wait.lock FD: 28 BD: 10 ....: kauditd_wait.lock ->&p->pi_lock FD: 1 BD: 10 ....: &list->lock FD: 1 BD: 1 ....: printk_ratelimit_state.lock FD: 3 BD: 2 +.+.: lock#2 ->&zone->lock FD: 141 BD: 1 +.+.: khugepaged_mutex ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->lock#2 ->pcp_batch_high_lock FD: 3 BD: 14 +.+.: subsys mutex#5 ->&k->k_lock FD: 4 BD: 1 +.+.: subsys mutex#6 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 +.+.: regmap_debugfs_early_lock FD: 1 BD: 1 +.+.: (acpi_reconfig_chain).rwsem FD: 1 BD: 1 +.+.: __i2c_board_lock FD: 140 BD: 1 +.+.: core_lock ->&k->list_lock ->&k->k_lock ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock FD: 2 BD: 1 +.+.: thermal_governor_lock ->thermal_list_lock FD: 1 BD: 2 +.+.: thermal_list_lock FD: 161 BD: 1 +.+.: cpuidle_lock ->&obj_hash[i].lock ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount FD: 140 BD: 1 +.+.: k-sk_lock-AF_QIPCRTR ->k-slock-AF_QIPCRTR ->fs_reclaim ->qrtr_ports.xa_lock ->pool_lock#2 ->qrtr_node_lock ->&obj_hash[i].lock FD: 1 BD: 2 +...: k-slock-AF_QIPCRTR FD: 1 BD: 2 +.+.: qrtr_ports.xa_lock FD: 1 BD: 2 +.+.: qrtr_node_lock FD: 138 BD: 150 ++++: (crypto_chain).rwsem ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&____s->seqcount#2 ->quarantine_lock ->&n->list_lock ->remove_cache_srcu ->&cfs_rq->removed.lock FD: 291 BD: 1 +.+.: iova_cache_mutex ->cpu_hotplug_lock ->slab_mutex FD: 3 BD: 1 +.+.: subsys mutex#7 ->&k->k_lock FD: 1 BD: 140 ....: pci_config_lock FD: 1 BD: 1 +.+.: subsys mutex#8 FD: 138 BD: 84 +.+.: dev_pm_qos_mtx ->fs_reclaim ->pool_lock#2 ->&dev->power.lock ->pm_qos_lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&rq->__lock FD: 1 BD: 85 ....: pm_qos_lock FD: 162 BD: 83 +.+.: dev_pm_qos_sysfs_mtx ->dev_pm_qos_mtx ->&root->kernfs_rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock FD: 137 BD: 1 +.+.: mtrr_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 ..-.: uidhash_lock FD: 1 BD: 7 +.+.: &bdev->bd_holder_lock FD: 144 BD: 1 +.+.: (work_completion)(&eval_map_work) ->trace_event_sem ->trace_event_sem.wait_lock ->&p->pi_lock FD: 1 BD: 1 ....: oom_reaper_wait.lock FD: 1 BD: 1 +.+.: subsys mutex#9 FD: 28 BD: 4248 ....: &pgdat->kcompactd_wait ->&p->pi_lock FD: 142 BD: 2 +.+.: pcpu_balance_work ->pcpu_alloc_mutex ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 180 BD: 1 +.+.: memory_tier_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&____s->seqcount ->&zone->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#10 FD: 1 BD: 2 +.+.: subsys mutex#10 FD: 1 BD: 1 +.+.: ksm_thread_mutex FD: 1 BD: 1 ....: ksm_thread_wait.lock FD: 1 BD: 2 +.+.: damon_ops_lock FD: 139 BD: 149 ++++: crypto_alg_sem ->(crypto_chain).rwsem ->&rq->__lock FD: 42 BD: 4 +.+.: lock#3 ->&obj_hash[i].lock ->&rq->__lock ->(work_completion)(work) ->&x->wait#10 ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->pool_lock#2 FD: 1 BD: 137 +.+.: khugepaged_mm_lock FD: 28 BD: 137 ....: khugepaged_wait.lock ->&p->pi_lock FD: 1 BD: 4280 ..-.: quarantine_lock FD: 38 BD: 4237 .+.+: remove_cache_srcu ->quarantine_lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&____s->seqcount ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&base->lock ->&meta->lock ->kfence_freelist_lock ->stock_lock FD: 155 BD: 2 +.+.: (work_completion)(&pwq->unbound_release_work) ->&wq->mutex ->wq_pool_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&pool->lock ->&rnp->exp_wq[0] ->&rq->__lock ->&rnp->exp_wq[1] ->&rnp->exp_wq[3] ->&rnp->exp_lock ->rcu_state.exp_mutex ->&rnp->exp_wq[2] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->pool_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 168 BD: 7 +.+.: bio_slab_lock ->fs_reclaim ->pool_lock#2 ->slab_mutex ->bio_slabs.xa_lock ->&rq->__lock FD: 2 BD: 8 +.+.: bio_slabs.xa_lock ->pool_lock#2 FD: 138 BD: 3 +.+.: major_names_lock ->fs_reclaim ->pool_lock#2 ->major_names_spinlock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 4 +.+.: major_names_spinlock FD: 3 BD: 14 +.+.: subsys mutex#11 ->&k->k_lock FD: 1 BD: 1 ....: *(&acpi_gbl_hardware_lock) FD: 39 BD: 1 ....: *(&acpi_gbl_gpe_lock) ->(console_sem).lock FD: 5 BD: 152 ....: mask_lock ->tmp_mask_lock FD: 4 BD: 153 -...: tmp_mask_lock ->vector_lock ->ioapic_lock FD: 1 BD: 1 -...: shrink_qlist.lock FD: 31 BD: 7 ....: remove_cache_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 34 BD: 134 +.+.: flush_lock ->&obj_hash[i].lock ->(work_completion)(&sfw->work) ->&x->wait#10 ->&rq->__lock FD: 10 BD: 136 +.+.: (work_completion)(&sfw->work) ->&c->lock ->&n->list_lock ->&obj_hash[i].lock FD: 31 BD: 135 +.+.: (wq_completion)slub_flushwq ->(work_completion)(&sfw->work) ->(work_completion)(&barr->work) FD: 28 BD: 4369 ....: &x->wait#10 ->&p->pi_lock FD: 29 BD: 142 +.+.: (work_completion)(&barr->work) ->&x->wait#10 ->&rq->__lock FD: 1 BD: 1 +.+.: system_transition_mutex FD: 1 BD: 1 +.+.: (power_off_prep_handler_list).rwsem FD: 1 BD: 1 ....: power_off_handler_list.lock FD: 1 BD: 1 +.+.: (restart_prep_handler_list).rwsem FD: 1 BD: 1 +.+.: (reboot_notifier_list).rwsem FD: 208 BD: 1 +.+.: acpi_scan_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#9 ->&c->lock ->&zone->lock ->&____s->seqcount ->acpi_device_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#12 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&n->list_lock ->*(&acpi_gbl_reference_count_lock) ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->pci_config_lock ->&meta->lock ->(console_sem).lock ->pci_bus_sem ->pci_mmcfg_lock ->resource_lock ->&device->physical_node_lock ->fwnode_link_lock ->devtree_lock ->gdp_mutex ->subsys mutex#13 ->pci_acpi_companion_lookup_sem ->pci_slot_mutex ->tk_core.seq.seqcount ->resource_alignment_lock ->device_links_srcu ->subsys mutex#14 ->acpi_pm_notifier_install_lock ->pci_rescan_remove_lock ->subsys mutex#3 ->acpi_link_lock ->acpi_dep_list_lock ->wakeup_ida.xa_lock ->subsys mutex#15 ->events_lock ->power_resource_list_lock FD: 139 BD: 2 +.+.: acpi_device_lock ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#2 ->semaphore->lock ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->quarantine_lock FD: 1 BD: 3 ....: &xa->xa_lock#2 FD: 1 BD: 2 +.+.: subsys mutex#12 FD: 1 BD: 2 ++++: pci_bus_sem FD: 1 BD: 2 +.+.: pci_mmcfg_lock FD: 161 BD: 12 +.+.: &device->physical_node_lock ->sysfs_symlink_target_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount FD: 3 BD: 2 +.+.: subsys mutex#13 ->&k->k_lock FD: 1 BD: 2 .+.+: pci_acpi_companion_lookup_sem FD: 1 BD: 2 +.+.: pci_slot_mutex FD: 1 BD: 2 +.+.: resource_alignment_lock FD: 1 BD: 4301 ....: &dev->power.lock/1 FD: 1 BD: 2 +.+.: subsys mutex#14 FD: 185 BD: 2 +.+.: acpi_pm_notifier_install_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->*(&acpi_gbl_reference_count_lock) ->acpi_pm_notifier_lock FD: 182 BD: 3 +.+.: acpi_pm_notifier_lock ->fs_reclaim ->pool_lock#2 ->wakeup_ida.xa_lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#15 ->events_lock FD: 1 BD: 7 ....: wakeup_ida.xa_lock FD: 3 BD: 7 +.+.: subsys mutex#15 ->&k->k_lock FD: 1 BD: 7 ....: events_lock FD: 139 BD: 1 +.+.: &pgdat->kswapd_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock FD: 32 BD: 1 ..-.: drivers/char/random.c:251 FD: 15 BD: 2 +.+.: (next_reseed).work ->&obj_hash[i].lock ->&base->lock ->input_pool.lock ->base_crng.lock FD: 30 BD: 1 ..-.: mm/vmstat.c:2018 FD: 290 BD: 2 +.+.: (shepherd).work ->cpu_hotplug_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 37 BD: 5 +.+.: (wq_completion)mm_percpu_wq ->(work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->(work_completion)(work) ->(work_completion)(&barr->work) ->&rq->__lock FD: 28 BD: 6 +.+.: (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->&obj_hash[i].lock ->&base->lock ->&pcp->lock ->&rq->__lock FD: 36 BD: 2 +.+.: pci_rescan_remove_lock FD: 140 BD: 2 +.+.: acpi_link_lock ->fs_reclaim ->pool_lock#2 ->semaphore->lock ->&obj_hash[i].lock ->*(&acpi_gbl_reference_count_lock) ->pci_config_lock ->&zone->lock ->&____s->seqcount ->(console_sem).lock ->&c->lock ->&rq->__lock FD: 1 BD: 2 +.+.: acpi_dep_list_lock FD: 1 BD: 2 +.+.: power_resource_list_lock FD: 185 BD: 7 ++++: &(&priv->bus_notifier)->rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->i2c_dev_list_lock ->&x->wait#9 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#66 FD: 147 BD: 1 +.+.: &type->s_umount_key#10/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#9 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#9 ->&dentry->d_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#11/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#10 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#10 ->&dentry->d_lock FD: 234 BD: 136 ++++: &mm->mmap_lock ->reservation_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&mm->page_table_lock ->ptlock_ptr(page) ->&c->lock ->&anon_vma->rwsem ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->ptlock_ptr(page)#2 ->lock#4 ->lock#5 ->mmu_notifier_invalidate_range_start ->&vma->vm_lock->lock ->&obj_hash[i].lock ->&lruvec->lru_lock ->&rq->__lock ->quarantine_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock ->&mapping->i_mmap_rwsem ->resource_lock ->&p->alloc_lock ->tk_core.seq.seqcount ->&mm->mmap_lock/1 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&sem->wait_lock ->&p->pi_lock ->&n->list_lock ->remove_cache_srcu ->&folio_wait_table[i] ->pool_lock ->&rcu_state.expedited_wq ->khugepaged_mm_lock ->khugepaged_wait.lock ->&xa->xa_lock#7 ->&info->lock ->mount_lock ->&sb->s_type->i_lock_key ->&wb->list_lock ->&kcov->lock ->&____s->seqcount#2 ->stock_lock ->sb_pagefaults ->&mapping->private_lock ->&sb->s_type->i_mutex_key#21 ->&hugetlbfs_i_mmap_rwsem_key ->&hugetlb_fault_mutex_table[i] ->hugetlb_lock ->&resv_map->lock ->&vma_lock->rw_sema ->&dd->lock ->&xa->xa_lock#13 ->&s->s_inode_list_lock ->batched_entropy_u32.lock ->mapping.invalidate_lock ->&pgdat->kswapd_wait ->&po->pg_vec_lock FD: 143 BD: 153 +.+.: reservation_ww_class_acquire ->reservation_ww_class_mutex FD: 142 BD: 154 +.+.: reservation_ww_class_mutex ->fs_reclaim ->&shmem->vmap_lock FD: 68 BD: 4248 ++++: &mapping->i_mmap_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->quarantine_lock ->&anon_vma->rwsem ->&cfs_rq->removed.lock ->&sem->wait_lock ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->rcu_node_0 ->&meta->lock ->kfence_freelist_lock ->&p->pi_lock ->&rcu_state.expedited_wq ->ptlock_ptr(page)#2 ->&base->lock ->lock#4 ->lock#5 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock FD: 1 BD: 4262 +.+.: dma_fence_map FD: 30 BD: 3 +.+.: delayed_uprobe_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 4269 ....: key FD: 1 BD: 4 +.+.: attribute_container_mutex FD: 151 BD: 17 ++++: triggers_list_lock ->&led_cdev->trigger_lock FD: 151 BD: 17 ++++: leds_list_lock ->&led_cdev->trigger_lock FD: 191 BD: 2 ++++: (usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#60 ->mon_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock FD: 1 BD: 1 +.+.: rc_map_lock FD: 1 BD: 1 +.+.: subsys mutex#16 FD: 1 BD: 2 +.+.: &entry->access FD: 139 BD: 2 +.+.: info_mutex ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock FD: 1 BD: 183 +.+.: kobj_ns_type_lock FD: 13 BD: 79 +.+.: &xa->xa_lock#3 ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 29 BD: 73 +.+.: subsys mutex#17 ->&k->k_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 16 BD: 3784 ..-.: &dir->lock#2 ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&base->lock ->&meta->lock ->kfence_freelist_lock FD: 35 BD: 78 +.+.: dev_hotplug_mutex ->&dev->power.lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 14 BD: 78 ++++: dev_base_lock ->&xa->xa_lock#3 FD: 1 BD: 69 ++++: qdisc_mod_lock FD: 20 BD: 1 ++++: bt_proto_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->chan_list_lock ->l2cap_sk_list.lock ->hci_sk_list.lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->hidp_sk_list.lock ->sco_sk_list.lock ->bnep_sk_list.lock FD: 149 BD: 13 +.+.: hci_cb_list_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->chan_list_lock ->&conn->ident_lock ->&base->lock ->&list->lock#9 ->&conn->chan_lock ->&rq->__lock ->&c->lock FD: 292 BD: 4 +.+.: mgmt_chan_list_lock ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->hci_dev_list_lock ->(console_sem).lock ->&rq->__lock ->&hdev->lock ->&data->lock ->fs_reclaim ->&c->lock ->rlock-AF_BLUETOOTH ->&n->list_lock FD: 1 BD: 3656 ....: &list->lock#2 FD: 137 BD: 71 +.+.: rate_ctrl_mutex ->fs_reclaim ->pool_lock#2 FD: 2 BD: 1 +.+.: netlbl_domhsh_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: netlbl_unlhsh_lock FD: 196 BD: 1 +.+.: misc_mtx ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#18 ->misc_minors_ida.xa_lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&cfs_rq->removed.lock ->&base->lock ->&dir->lock ->rfkill_global_mutex ->&____s->seqcount#2 ->&n->list_lock ->remove_cache_srcu ->misc_mtx.wait_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 28 BD: 169 ....: &x->wait#11 ->&p->pi_lock FD: 148 BD: 1 .+.+: sb_writers ->mount_lock ->&type->i_mutex_dir_key/1 ->&sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&wb->list_lock ->&type->i_mutex_dir_key#2 ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&dentry->d_lock ->tomoyo_ss ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&s->s_inode_list_lock ->&sbinfo->stat_lock ->&xa->xa_lock#7 ->&fsnotify_mark_srcu FD: 138 BD: 2 +.+.: &type->i_mutex_dir_key/1 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->&sbinfo->stat_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#5 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&sb->s_type->i_mutex_key#4 ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->tomoyo_ss ->&u->bindlock ->remove_cache_srcu ->&fsnotify_mark_srcu ->&rq->__lock ->&n->list_lock ->&sb->s_type->i_mutex_key#4/4 ->krc.lock ->&xa->xa_lock#7 ->&sem->wait_lock FD: 113 BD: 3 +.+.: &sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->tomoyo_ss ->&xattrs->lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_mutex_key#4/4 ->&sb->s_type->i_lock_key#5 ->&rq->__lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->stock_lock ->&xa->xa_lock#7 ->lock#4 ->&info->lock ->key#9 ->rcu_node_0 ->&sem->wait_lock ->&wb->list_lock ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 3 BD: 2 +.+.: subsys mutex#18 ->&k->k_lock FD: 200 BD: 6 +.+.: input_mutex ->&rq->__lock ->input_devices_poll_wait.lock ->fs_reclaim ->pool_lock#2 ->&dev->mutex#2 ->input_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->&zone->lock ->&cfs_rq->removed.lock ->&led_cdev->led_access ->&mousedev->mutex/1 FD: 187 BD: 2 +.+.: (work_completion)(&rfkill_global_led_trigger_work) ->rfkill_global_mutex FD: 186 BD: 9 +.+.: rfkill_global_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->&rfkill->lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&k->k_lock ->subsys mutex#41 ->triggers_list_lock ->leds_list_lock ->&pool->lock ->rfkill_global_mutex.wait_lock ->&cfs_rq->removed.lock ->&n->list_lock ->&sem->wait_lock ->&p->pi_lock ->uevent_sock_mutex.wait_lock ->&data->mtx ->&____s->seqcount#2 ->remove_cache_srcu FD: 1 BD: 7 ....: input_devices_poll_wait.lock FD: 317 BD: 3 ++++: (netlink_chain).rwsem ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->reg_indoor_lock ->hwsim_radio_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&q->instances_lock ->&log->instances_lock ->&nft_net->commit_mutex ->&c->lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 13 BD: 1 ++++: proto_tab_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 3 BD: 1 ....: random_ready_notifier.lock ->crngs.lock FD: 1 BD: 2 ....: misc_minors_ida.xa_lock FD: 40 BD: 1 ....: vga_lock#2 ->pci_config_lock ->(console_sem).lock FD: 187 BD: 1 +.+.: disable_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#3 FD: 147 BD: 1 +.+.: &type->s_umount_key#12/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#11 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#11 ->&dentry->d_lock FD: 291 BD: 2 +.+.: timer_update_work ->timer_keys_mutex FD: 290 BD: 3 +.+.: timer_keys_mutex ->cpu_hotplug_lock FD: 317 BD: 1 +.+.: (work_completion)(&tracerfs_init_work) ->pin_fs_lock ->fs_reclaim ->pool_lock#2 ->sb_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&type->s_umount_key#13/1 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->&obj_hash[i].lock ->&sb->s_type->i_mutex_key#5 ->event_mutex ->(module_notify_list).rwsem ->trace_types_lock FD: 147 BD: 2 +.+.: &type->s_umount_key#13/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#12 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 8 +.+.: &sb->s_type->i_lock_key#12 ->&dentry->d_lock FD: 139 BD: 6 +.+.: &sb->s_type->i_mutex_key#5 ->&sb->s_type->i_lock_key#12 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock FD: 306 BD: 2 +.+.: event_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->trace_event_sem ->trace_event_sem.wait_lock ->&p->pi_lock ->trace_types_lock ->sched_register_mutex ->tracepoints_mutex ->&rq->__lock FD: 1 BD: 5 ....: trace_event_sem.wait_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#14/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#13 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#13 ->&dentry->d_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#15/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#14 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#14 ->&dentry->d_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#16/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#15 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 3 +.+.: &sb->s_type->i_lock_key#15 ->&dentry->d_lock FD: 138 BD: 1 +.+.: kclist_lock ->resource_lock ->fs_reclaim ->pool_lock#2 FD: 146 BD: 1 +.+.: &type->s_umount_key#17/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#16 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 54 BD: 4268 +.+.: &sb->s_type->i_lock_key#16 ->&dentry->d_lock ->&xa->xa_lock#7 FD: 240 BD: 33 .+.+: tomoyo_ss ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tomoyo_policy_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dentry->d_lock ->tomoyo_log_lock ->tomoyo_log_wait.lock ->&rq->__lock ->file_systems_lock ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->&mm->mmap_lock ->&meta->lock ->quarantine_lock ->rcu_node_0 ->&n->list_lock ->remove_cache_srcu ->rename_lock ->&cfs_rq->removed.lock ->&base->lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->mount_lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#18/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#17 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 4 +.+.: &sb->s_type->i_lock_key#17 ->&dentry->d_lock FD: 141 BD: 1 +.+.: &ns->lock ->&dentry->d_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#6 FD: 139 BD: 2 +.+.: &sb->s_type->i_mutex_key#6 ->&sb->s_type->i_lock_key#17 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount FD: 42 BD: 1 +.+.: &type->s_umount_key#19 ->sb_lock ->&dentry->d_lock FD: 137 BD: 1 +.+.: pnp_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#19 FD: 3 BD: 1 +.+.: subsys mutex#20 ->&k->k_lock FD: 3 BD: 10 +.+.: subsys mutex#21 ->&k->k_lock FD: 3 BD: 1 +.+.: subsys mutex#22 ->&k->k_lock FD: 366 BD: 1 +.+.: tty_mutex ->(console_sem).lock ->console_lock ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->tty_ldiscs_lock ->&k->list_lock ->&k->k_lock ->&tty->legacy_mutex FD: 4 BD: 1 +.+.: subsys mutex#23 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 ....: netevent_notif_chain.lock FD: 213 BD: 11 ++++: clients_rwsem ->fs_reclaim ->clients.xa_lock ->&device->client_data_rwsem FD: 2 BD: 12 +.+.: clients.xa_lock ->pool_lock#2 FD: 855 BD: 10 ++++: devices_rwsem ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->devices.xa_lock ->&c->lock ->&obj_hash[i].lock ->(console_sem).lock ->clients_rwsem ->rdma_nets_rwsem ->&pdata->netdev_lock ->&table->lock#4 FD: 1 BD: 1 +.+.: (blocking_lsm_notifier_chain).rwsem FD: 211 BD: 69 ++++: (inetaddr_chain).rwsem ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&____s->seqcount ->&obj_hash[i].lock ->fib_info_lock ->&dir->lock#2 ->&c->lock ->nl_table_lock ->nl_table_wait.lock ->&net->sctp.local_addr_lock ->&rq->__lock ->rlock-AF_NETLINK ->&n->list_lock ->remove_cache_srcu ->&ipvlan->addrs_lock ->&____s->seqcount#2 ->krc.lock ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->stock_lock FD: 1 BD: 5 ....: inet6addr_chain.lock FD: 1 BD: 1 +.+.: buses_mutex FD: 1 BD: 1 +.+.: offload_lock FD: 1 BD: 1 +...: inetsw_lock FD: 855 BD: 1 +.+.: (wq_completion)events_power_efficient ->(work_completion)(&(&tbl->managed_work)->work) ->(check_lifetime_work).work ->(work_completion)(&(&cache_cleaner)->work) ->(work_completion)(&(&ops->cursor_work)->work) ->(work_completion)(&(&hub->init_work)->work) ->(work_completion)(&(&gc_work->dwork)->work) ->(work_completion)(&(&tbl->gc_work)->work) ->(reg_check_chans).work ->(crda_timeout).work ->(gc_work).work ->&rq->__lock ->(work_completion)(&(&hinfo->gc_work)->work) ->(work_completion)(&barr->work) FD: 49 BD: 2 +.+.: (work_completion)(&(&tbl->managed_work)->work) ->&tbl->lock ->&rq->__lock FD: 48 BD: 3657 ++-.: &tbl->lock ->&obj_hash[i].lock ->&base->lock ->&c->lock ->pool_lock#2 ->&____s->seqcount ->batched_entropy_u32.lock ->&n->lock ->nl_table_lock ->nl_table_wait.lock ->&dir->lock#2 ->krc.lock ->&n->list_lock ->rlock-AF_NETLINK ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 73 +.+.: ptype_lock FD: 28 BD: 2 +.+.: (check_lifetime_work).work ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 69 +.+.: &net->rules_mod_lock FD: 1 BD: 1 +.+.: tcp_ulp_list_lock FD: 1 BD: 1 +...: xfrm_state_afinfo_lock FD: 1 BD: 1 +.+.: xfrm_policy_afinfo_lock FD: 1 BD: 1 +...: xfrm_input_afinfo_lock FD: 18 BD: 4389 ..-.: krc.lock ->&obj_hash[i].lock ->hrtimer_bases.lock ->&base->lock FD: 141 BD: 1 +.+.: (wq_completion)events_highpri ->(work_completion)(&(&krcp->page_cache_work)->work) ->(work_completion)(flush) ->(work_completion)(&barr->work) FD: 137 BD: 3 +.+.: (work_completion)(&(&krcp->page_cache_work)->work) ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->krc.lock FD: 1 BD: 3 +.+.: &hashinfo->lock FD: 1 BD: 1 +.+.: tcp_cong_list_lock FD: 2 BD: 7 +.+.: cache_list_lock ->&cd->hash_lock FD: 1 BD: 1 +.+.: (rpc_pipefs_notifier_list).rwsem FD: 1 BD: 1 +.+.: svc_xprt_class_lock FD: 39 BD: 1 +.+.: xprt_list_lock ->(console_sem).lock FD: 29 BD: 2 +.+.: (work_completion)(&(&cache_cleaner)->work) ->cache_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 14 BD: 1 +.-.: (&tcp_orphan_timer) ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 1 ....: pcibios_fwaddrmap_lock FD: 142 BD: 3 .+.+: sb_writers#2 ->mount_lock ->&sb->s_type->i_mutex_key/1 ->&sb->s_type->i_mutex_key FD: 138 BD: 4 +.+.: &sb->s_type->i_mutex_key/1 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->tomoyo_ss ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#2 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->&sb->s_type->i_mutex_key FD: 1 BD: 2 +.+.: tomoyo_log_lock FD: 1 BD: 2 ....: tomoyo_log_wait.lock FD: 67 BD: 4267 +.+.: &wb->list_lock ->&sb->s_type->i_lock_key#2 ->&sb->s_type->i_lock_key#23 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_lock_key ->&sb->s_type->i_lock_key#5 ->&sb->s_type->i_lock_key#8 ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#3 ->&sb->s_type->i_lock_key#16 ->&sb->s_type->i_lock_key#27 ->&sb->s_type->i_lock_key#31 ->&sb->s_type->i_lock_key#30 FD: 190 BD: 3 ++++: umhelper_sem ->usermodehelper_disabled_waitq.lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->subsys mutex#80 ->fw_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&x->wait#23 ->&base->lock ->(&timer.timer) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->remove_cache_srcu FD: 1 BD: 4 ....: usermodehelper_disabled_waitq.lock FD: 207 BD: 2 +.+.: (work_completion)(&sub_info->work) ->&sighand->siglock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->batched_entropy_u64.lock ->&obj_hash[i].lock ->&c->lock ->init_files.file_lock ->init_fs.lock ->&p->alloc_lock ->lock ->pidmap_lock ->cgroup_threadgroup_rwsem ->input_pool.lock ->&p->pi_lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&sig->wait_chldexit ->tasklist_lock ->&prev->lock ->css_set_lock ->&x->wait#17 ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->&meta->lock ->remove_cache_srcu FD: 1 BD: 1 +.+.: &drv->dynids.lock FD: 1 BD: 1 +.+.: umh_sysctl_lock FD: 67 BD: 4229 ++++: &anon_vma->rwsem ->&mm->page_table_lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&sem->wait_lock ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->&rcu_state.gp_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&base->lock ->ptlock_ptr(page)#2 ->stock_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->batched_entropy_u8.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4834 -.-.: per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 10 BD: 4266 +.+.: lock#4 ->&lruvec->lru_lock ->&obj_hash[i].lock ->&pcp->lock ->lock#10 FD: 278 BD: 1 +.+.: &sig->cred_guard_mutex ->fs_reclaim ->pool_lock#2 ->&fs->lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->delayed_uprobe_lock ->&mm->mmap_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->pool_lock ->&n->list_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&dentry->d_lock/1 ->&meta->lock ->init_fs.lock ->&type->i_mutex_dir_key#3 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_mutex_key#8 ->&p->pi_lock ->aa_buffers_lock ->mapping.invalidate_lock ->&folio_wait_table[i] ->tomoyo_ss ->&iint->mutex ->binfmt_lock ->entries_lock ->&ei->xattr_sem ->&tsk->futex_exit_mutex ->&sig->exec_update_lock ->&p->alloc_lock ->tk_core.seq.seqcount ->&lock->wait_lock ->key#5 ->&stopper->lock ->&stop_pi_lock ->&x->wait#8 ->remove_cache_srcu ->&____s->seqcount#2 FD: 2 BD: 4283 ..-.: &lruvec->lru_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 6 BD: 4260 +.+.: lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock FD: 136 BD: 138 ++++: &vma->vm_lock->lock ->&rq->__lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->ptlock_ptr(page)#2 ->mmu_notifier_invalidate_range_start ->&lruvec->lru_lock ->&obj_hash[i].lock ->rcu_node_0 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->ptlock_ptr(page) ->&rcu_state.gp_wq ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&p->pi_lock ->stock_lock ->&rcu_state.expedited_wq ->&n->list_lock ->lock#4 ->lock#5 FD: 235 BD: 2 +.+.: &tsk->futex_exit_mutex ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&mm->mmap_lock ->rcu_node_0 FD: 30 BD: 1 +.+.: &child->perf_event_mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 139 ....: &pid->wait_pidfd FD: 28 BD: 154 ....: &sig->wait_chldexit ->&p->pi_lock FD: 15 BD: 154 ....: &(&sig->stats_lock)->lock ->&____s->seqcount#5 FD: 14 BD: 155 ....: &____s->seqcount#5 ->pidmap_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 39 BD: 1 +.+.: low_water_lock ->(console_sem).lock ->console_owner_lock ->console_owner FD: 293 BD: 1 +.+.: vendor_module_lock ->slab_mutex ->pcpu_alloc_mutex ->&obj_hash[i].lock ->percpu_counters_lock ->fs_reclaim ->pool_lock#2 ->shrinker_rwsem ->&zone->lock ->&____s->seqcount ->cpu_hotplug_lock ->timekeeper_lock FD: 30 BD: 1 ..-.: &(&cache_cleaner)->timer FD: 1 BD: 4869 -.-.: pvclock_gtod_data FD: 144 BD: 3 ++++: &type->i_mutex_dir_key#2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->namespace_sem ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&n->list_lock ->&sem->wait_lock ->&rq->__lock ->remove_cache_srcu ->&xa->xa_lock#13 ->&obj_hash[i].lock ->stock_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->batched_entropy_u32.lock ->&____s->seqcount#2 FD: 3 BD: 136 +.+.: subsys mutex#24 ->&k->k_lock FD: 3 BD: 136 +.+.: subsys mutex#25 ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#26 FD: 197 BD: 1 +.+.: subsys mutex#27 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->platform_devid_ida.xa_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#3 ->&rq->__lock ->wakeup_ida.xa_lock ->gdp_mutex ->subsys mutex#15 ->events_lock ->rtcdev_lock FD: 1 BD: 1 +.+.: subsys mutex#28 FD: 37 BD: 2 +.+.: (work_completion)(&p->wq) ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->quarantine_lock ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->rcu_node_0 ->&base->lock ->&rcu_state.expedited_wq FD: 30 BD: 1 ..-.: &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer FD: 32 BD: 1 ..-.: mm/memcontrol.c:589 FD: 29 BD: 2 +.+.: (stats_flush_dwork).work ->cgroup_rstat_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 FD: 2 BD: 19 ....: cgroup_rstat_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 30 BD: 1 ..-.: &(&group->avgs_work)->timer FD: 28 BD: 2 +.+.: (work_completion)(&(&group->avgs_work)->work) ->&group->avgs_lock ->&rq->__lock FD: 27 BD: 3 +.+.: &group->avgs_lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 30 BD: 1 ..-.: &(&krcp->monitor_work)->timer FD: 30 BD: 1 ..-.: &(&tbl->managed_work)->timer FD: 34 BD: 2 +.+.: (work_completion)(&(&krcp->monitor_work)->work) ->krc.lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 137 +.+.: subsys mutex#29 FD: 1 BD: 4 +.+.: key_user_lock FD: 1 BD: 4 +.+.: key_serial_lock FD: 5 BD: 5 +.+.: key_construction_mutex ->&obj_hash[i].lock ->pool_lock#2 ->keyring_name_lock FD: 144 BD: 3 +.+.: &type->lock_class ->keyring_serialise_link_lock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->key_user_lock ->crngs.lock ->key_serial_lock ->key_construction_mutex ->ima_keys_lock FD: 140 BD: 4 +.+.: keyring_serialise_link_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->root_key_user.lock ->key_construction_mutex FD: 28 BD: 4258 ....: &pgdat->kswapd_wait ->&p->pi_lock FD: 1 BD: 1 +.+.: drivers_lock FD: 145 BD: 1 +.+.: damon_dbgfs_lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->damon_ops_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 147 BD: 1 +.+.: &type->s_umount_key#20/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#18 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#18 ->&dentry->d_lock FD: 1 BD: 1 +.+.: dq_list_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#21/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->&____s->seqcount ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 7 +.+.: &sb->s_type->i_lock_key#19 ->&dentry->d_lock FD: 1 BD: 1 +.+.: configfs_subsystem_mutex FD: 146 BD: 1 +.+.: &sb->s_type->i_mutex_key#7/1 ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]/2 ->&zone->lock ->&default_group_class[depth - 1]#2 ->&obj_hash[i].lock FD: 1 BD: 8 +.+.: configfs_dirent_lock FD: 144 BD: 2 +.+.: &default_group_class[depth - 1]/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#3/2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 137 BD: 1 +.+.: ecryptfs_daemon_hash_mux ->fs_reclaim ->pool_lock#2 FD: 2 BD: 1 +.+.: ecryptfs_msg_ctx_lists_mux ->&ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 2 +.+.: &ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 1 ....: &ecryptfs_kthread_ctl.wait FD: 1 BD: 1 +.+.: nfs_version_lock FD: 152 BD: 1 ++++: key_types_sem ->(console_sem).lock ->&rq->__lock ->asymmetric_key_parsers_sem ->&type->lock_class ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: pnfs_spinlock FD: 27 BD: 5 +.+.: &sn->pipefs_sb_lock ->&rq->__lock FD: 1 BD: 1 +.+.: nls_lock FD: 1 BD: 1 +.+.: jffs2_compressor_list_lock FD: 1 BD: 1 +.+.: next_tag_value_lock FD: 1 BD: 1 ....: log_redrive_lock FD: 2 BD: 1 ....: &TxAnchor.LazyLock ->jfs_commit_thread_wait.lock FD: 1 BD: 2 ....: jfs_commit_thread_wait.lock FD: 1 BD: 1 +.+.: jfsTxnLock FD: 39 BD: 1 +.+.: ocfs2_stack_lock ->(console_sem).lock FD: 1 BD: 1 +.+.: o2hb_callback_sem FD: 1 BD: 1 +.+.: o2net_handler_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#22/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock ->&xa->xa_lock#13 ->&rq->__lock ->&obj_hash[i].lock ->stock_lock ->&n->list_lock ->&____s->seqcount#2 ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock FD: 40 BD: 5 +.+.: &sb->s_type->i_lock_key#20 ->&dentry->d_lock FD: 289 BD: 77 +.+.: nf_hook_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&zone->lock ->&obj_hash[i].lock ->stock_lock ->&n->list_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->cpu_hotplug_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock FD: 137 BD: 1 ++++: alg_types_sem ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: dma_list_mutex FD: 143 BD: 2 ++++: asymmetric_key_parsers_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->crypto_alg_sem ->&obj_hash[i].lock ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&zone->lock ->&____s->seqcount ->&cfs_rq->removed.lock FD: 841 BD: 1 +.+.: blkcg_pol_register_mutex ->blkcg_pol_mutex ->cgroup_mutex FD: 1 BD: 4 +.+.: elv_list_lock FD: 141 BD: 1 +.+.: crc_t10dif_mutex ->crypto_alg_sem ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock FD: 141 BD: 1 +.+.: crc64_rocksoft_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ts_mod_lock FD: 3 BD: 7 +.+.: subsys mutex#30 ->&k->k_lock FD: 37 BD: 10 +.+.: &dev->mutex#2 ->&obj_hash[i].lock ->&rq->__lock ->&rnp->exp_lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&lock->wait_lock FD: 31 BD: 7 ....: wakeup_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 1 BD: 3 ....: wakeup_srcu FD: 1 BD: 3 ....: (&ws->timer) FD: 1 BD: 274 +.+.: klist_remove_lock FD: 5 BD: 3918 ....: &ws->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 1 BD: 3 ....: deleted_ws.lock FD: 174 BD: 1 +.+.: register_count_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->&c->lock ->&____s->seqcount ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&rq->__lock FD: 1 BD: 1 +.+.: (cpufreq_policy_notifier_list).rwsem FD: 1 BD: 1 +.+.: cpuidle_driver_lock FD: 1 BD: 1 ....: thermal_cdev_ida.xa_lock FD: 1 BD: 5 ....: cpufreq_driver_lock FD: 3 BD: 1 +.+.: subsys mutex#31 ->&k->k_lock FD: 1 BD: 219 +.+.: pcpu_alloc_mutex.wait_lock FD: 1 BD: 1 +.+.: (x86_mce_decoder_chain).rwsem FD: 1 BD: 1 ....: virtio_index_ida.xa_lock FD: 1 BD: 1 +.+.: subsys mutex#32 FD: 179 BD: 134 +.+.: &md->mutex ->fs_reclaim ->pool_lock#2 ->irq_domain_mutex ->pci_config_lock ->&xa->xa_lock#4 ->&domain->mutex ->&irq_desc_lock_class ->vector_lock ->&root->kernfs_rwsem ->lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 2 BD: 135 +.+.: &xa->xa_lock#4 ->pool_lock#2 FD: 1 BD: 1 +.+.: &dev->vqs_list_lock FD: 1 BD: 1 ....: &vp_dev->lock FD: 1 BD: 1 +.+.: (oom_notify_list).rwsem FD: 1 BD: 1 ....: &dev->config_lock FD: 1 BD: 1 +.+.: vdpa_dev_lock FD: 3 BD: 1 +.+.: subsys mutex#33 ->&k->k_lock FD: 30 BD: 1 -.-.: &vb->stop_update_lock FD: 291 BD: 1 +.+.: (wq_completion)events_freezable ->(work_completion)(&vb->update_balloon_stats_work) ->&rq->__lock FD: 290 BD: 2 +.+.: (work_completion)(&vb->update_balloon_stats_work) ->cpu_hotplug_lock ->&s->s_inode_list_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 29 BD: 4326 +.+.: &dentry->d_lock/1 ->&lru->node[i].lock FD: 266 BD: 1 +.+.: serial_mutex ->gpio_lookup_lock ->port_mutex FD: 1 BD: 2 +.+.: gpio_lookup_lock FD: 264 BD: 2 +.+.: port_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#34 ->&zone->lock ->&port->mutex FD: 1 BD: 3 +.+.: subsys mutex#34 FD: 1 BD: 4301 ....: &dev->power.wait_queue FD: 150 BD: 1 +.+.: (wq_completion)pm ->(work_completion)(&dev->power.work) FD: 149 BD: 2 +.+.: (work_completion)(&dev->power.work) ->&dev->power.lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->hcd_root_hub_lock ->fs_reclaim ->&vhci_hcd->vhci->lock ->&obj_hash[i].lock ->&x->wait#19 ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&zone->lock ->&____s->seqcount ->&hub->irq_urb_lock ->(&hub->irq_urb_retry) ->hcd_urb_unlink_lock ->device_state_lock ->hcd_urb_list_lock ->usb_kill_urb_queue.lock ->(work_completion)(&hub->tt.clear_work) ->&port_lock_key FD: 259 BD: 9 +.+.: &port->mutex ->fs_reclaim ->pool_lock#2 ->console_mutex ->resource_lock ->&port_lock_key ->(console_sem).lock ->ctrl_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&dev->power.lock ->&k->list_lock ->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#35 ->semaphore->lock ->&c->lock ->&zone->lock ->&____s->seqcount ->*(&acpi_gbl_reference_count_lock) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->gdp_mutex ->req_lock ->&p->pi_lock ->&x->wait#11 ->subsys mutex#21 ->chrdevs_lock ->hash_mutex ->&i->lock ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock FD: 1 BD: 10 ....: ctrl_ida.xa_lock FD: 1 BD: 10 +.+.: subsys mutex#35 FD: 1 BD: 1 ....: rng_index_ida.xa_lock FD: 140 BD: 1 +.+.: rng_mutex ->&x->wait#13 ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock FD: 28 BD: 2 -.-.: &x->wait#12 ->&p->pi_lock FD: 1 BD: 2 ....: &x->wait#13 FD: 30 BD: 1 +.+.: reading_mutex ->reading_mutex.wait_lock ->&x->wait#12 ->&rq->__lock FD: 1 BD: 2 +.+.: reading_mutex.wait_lock FD: 1 BD: 1 ....: &dev->managed.lock FD: 147 BD: 1 +.+.: &type->s_umount_key#23/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#21 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#21 ->&dentry->d_lock FD: 2 BD: 238 ....: drm_minor_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: &dev->debugfs_mutex FD: 3 BD: 3 +.+.: subsys mutex#36 ->&k->k_lock FD: 1 BD: 1 ....: (worker)->lock FD: 137 BD: 24 +.+.: &dev->mode_config.idr_mutex ->fs_reclaim ->pool_lock#2 FD: 160 BD: 20 +.+.: crtc_ww_class_acquire ->crtc_ww_class_mutex ->fs_reclaim ->pool_lock#2 FD: 159 BD: 21 +.+.: crtc_ww_class_mutex ->reservation_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.blob_lock ->&crtc->commit_lock ->reservation_ww_class_mutex ->tk_core.seq.seqcount ->&vkms_out->lock ->&dev->vbl_lock ->&x->wait#14 ->(work_completion)(&vkms_state->composer_work) ->&base->lock ->&rq->__lock ->(&timer.timer) ->(work_completion)(&vkms_state->composer_work)#2 FD: 1 BD: 22 +.+.: &dev->mode_config.blob_lock FD: 1 BD: 1 ....: &xa->xa_lock#5 FD: 1 BD: 1 ....: &xa->xa_lock#6 FD: 1 BD: 23 ....: &dev->mode_config.connector_list_lock FD: 20 BD: 25 ..-.: &dev->vbl_lock ->&dev->vblank_time_lock FD: 190 BD: 1 .+.+: drm_connector_list_iter ->&dev->mode_config.connector_list_lock ->fs_reclaim ->pool_lock#2 ->&connector->mutex ->&c->lock ->&____s->seqcount FD: 188 BD: 2 +.+.: &connector->mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&zone->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&k->k_lock ->subsys mutex#36 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&dev->mode_config.idr_mutex ->&cfs_rq->removed.lock ->connector_list_lock FD: 1 BD: 3 +.+.: connector_list_lock FD: 1 BD: 1 +.+.: &dev->filelist_mutex FD: 231 BD: 1 +.+.: &dev->clientlist_mutex ->&helper->lock ->registration_lock ->(console_sem).lock ->kernel_fb_helper_lock FD: 189 BD: 16 +.+.: &helper->lock ->fs_reclaim ->pool_lock#2 ->&client->modeset_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&mgr->vm_lock ->&dev->object_name_lock ->&node->vm_lock ->&file_private->table_lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.fb_lock ->&file->fbs_lock ->&prime_fpriv->lock ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->init_mm.page_table_lock ->&rq->__lock ->&dev->master_mutex ->&lock->wait_lock ->&pool->lock ->reservation_ww_class_mutex FD: 162 BD: 18 +.+.: &client->modeset_mutex ->&dev->mode_config.mutex ->fs_reclaim ->pool_lock#2 ->crtc_ww_class_acquire FD: 161 BD: 19 +.+.: &dev->mode_config.mutex ->crtc_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount FD: 2 BD: 17 +.+.: &mgr->vm_lock ->pool_lock#2 FD: 36 BD: 17 +.+.: &dev->object_name_lock ->lock FD: 4 BD: 238 +.+.: &file_private->table_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 4 BD: 17 +.+.: &node->vm_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 17 +.+.: &dev->mode_config.fb_lock FD: 1 BD: 17 +.+.: &file->fbs_lock FD: 1 BD: 17 +.+.: &prime_fpriv->lock FD: 229 BD: 2 +.+.: registration_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->&zone->lock ->&____s->seqcount ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#11 ->&c->lock ->vt_switch_mutex ->(console_sem).lock ->console_lock FD: 137 BD: 3 +.+.: vt_switch_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 14 +.+.: &fb_info->lock FD: 163 BD: 17 +.+.: &dev->master_mutex ->&client->modeset_mutex FD: 1 BD: 22 +.+.: &crtc->commit_lock FD: 141 BD: 155 +.+.: &shmem->vmap_lock ->&shmem->pages_lock ->fs_reclaim ->&____s->seqcount ->&zone->lock ->pool_lock#2 ->&obj_hash[i].lock ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock FD: 137 BD: 156 +.+.: &shmem->pages_lock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->&info->lock FD: 41 BD: 4287 ..-.: &xa->xa_lock#7 ->pool_lock#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->key#10 ->&s->s_inode_wblist_lock ->&base->lock ->key#11 ->&wb->work_lock ->&n->list_lock ->key#13 ->&pl->lock ->stock_lock ->&xa->xa_lock#13 ->&____s->seqcount#2 ->key#30 FD: 2 BD: 4234 ....: &info->lock ->key#9 FD: 35 BD: 22 -.-.: &vkms_out->lock ->&dev->event_lock FD: 34 BD: 23 -.-.: &dev->event_lock ->&dev->vbl_lock ->&____s->seqcount#6 ->&x->wait#14 ->&obj_hash[i].lock ->pool_lock#2 ->&dev->vblank_time_lock ->&vblank->queue ->&base->lock FD: 1 BD: 28 ----: &____s->seqcount#6 FD: 28 BD: 24 -...: &x->wait#14 ->&p->pi_lock FD: 19 BD: 26 -.-.: &dev->vblank_time_lock ->tk_core.seq.seqcount ->&(&vblank->seqlock)->lock ->&obj_hash[i].lock ->hrtimer_bases.lock FD: 2 BD: 27 -.-.: &(&vblank->seqlock)->lock ->&____s->seqcount#6 FD: 1 BD: 22 +.+.: (work_completion)(&vkms_state->composer_work) FD: 1 BD: 18 ....: &helper->damage_lock FD: 191 BD: 2 +.+.: (work_completion)(&helper->damage_work) ->&helper->damage_lock ->&helper->lock FD: 1 BD: 3784 +.+.: &lock->wait_lock FD: 1 BD: 24 -.-.: &vblank->queue FD: 1 BD: 22 +.+.: (work_completion)(&vkms_state->composer_work)#2 FD: 1 BD: 14 ....: vt_event_lock FD: 1 BD: 2 +.+.: kernel_fb_helper_lock FD: 1 BD: 1 +...: &dev->queue_lock FD: 1 BD: 8 ....: blk_queue_ida.xa_lock FD: 309 BD: 10 +.+.: &q->sysfs_lock ->&q->unused_hctx_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->cpu_hotplug_lock ->fs_reclaim ->&xa->xa_lock#8 ->&q->debugfs_mutex ->pcpu_alloc_mutex ->&q->rq_qos_mutex ->&stats->lock ->lock ->&root->kernfs_rwsem ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->sysfs_symlink_target_lock ->kernfs_idr_lock ->&q->queue_lock ->&tags->lock ->&eq->sysfs_lock FD: 1 BD: 11 +.+.: &q->unused_hctx_lock FD: 2 BD: 13 +.+.: &xa->xa_lock#8 ->pool_lock#2 FD: 31 BD: 7 +.+.: &set->tag_list_lock ->&q->mq_freeze_lock ->percpu_ref_switch_lock FD: 2 BD: 8 +.+.: &xa->xa_lock#9 ->pool_lock#2 FD: 22 BD: 297 ....: &q->queue_lock ->&blkcg->lock ->pool_lock#2 ->pcpu_lock ->&obj_hash[i].lock ->percpu_counters_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock FD: 21 BD: 298 ....: &blkcg->lock ->pool_lock#2 ->percpu_ref_switch_lock ->(&sq->pending_timer) ->&obj_hash[i].lock ->&base->lock ->percpu_counters_lock ->pcpu_lock ->pool_lock FD: 29 BD: 13 +.+.: &q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->mq_freeze_wq ->&rq->__lock FD: 5 BD: 304 ..-.: percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 305 ..-.: &q->mq_freeze_wq FD: 1 BD: 13 +.+.: &bdev->bd_size_lock FD: 3 BD: 8 +.+.: subsys mutex#37 ->&k->k_lock FD: 310 BD: 8 +.+.: &q->sysfs_dir_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->batched_entropy_u8.lock ->kfence_freelist_lock ->&q->sysfs_lock ->&zone->lock ->&obj_hash[i].lock ->sysfs_symlink_target_lock ->kernfs_idr_lock ->&rq->__lock FD: 139 BD: 13 +.+.: &q->debugfs_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->mount_lock FD: 3 BD: 7 +.+.: subsys mutex#38 ->&k->k_lock FD: 1 BD: 8 ....: cgwb_lock FD: 1 BD: 7 +...: bdi_lock FD: 61 BD: 4258 +.+.: inode_hash_lock ->&sb->s_type->i_lock_key#3 ->&sb->s_type->i_lock_key#22 ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#30 ->&sb->s_type->i_lock_key#31 FD: 2 BD: 6 +.+.: bdev_lock ->&bdev->bd_holder_lock FD: 332 BD: 5 +.+.: &disk->open_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&xa->xa_lock#7 ->lock#4 ->mmu_notifier_invalidate_range_start ->&c->lock ->&mapping->private_lock ->tk_core.seq.seqcount ->&ret->b_uptodate_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->&base->lock ->&hctx->lock ->&x->wait#16 ->&rq->__lock ->(&timer.timer) ->&q->sysfs_dir_lock ->&bdev->bd_size_lock ->&dd->lock ->&folio_wait_table[i] ->(console_sem).lock ->&s->s_inode_list_lock ->pcpu_alloc_mutex ->&x->wait#9 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&k->k_lock ->subsys mutex#37 ->&xa->xa_lock#9 ->inode_hash_lock ->bdev_lock ->&meta->lock ->kfence_freelist_lock ->&lo->lo_mutex ->nbd_index_mutex ->&nbd->config_lock ->&new->lock ->&lock->wait_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->sb_lock ->&rcu_state.expedited_wq FD: 43 BD: 4261 +.+.: &mapping->private_lock ->&xa->xa_lock#7 FD: 29 BD: 8 ..-.: &ret->b_uptodate_lock ->bit_wait_table + i FD: 14 BD: 5 ....: floppy_lock ->&obj_hash[i].lock ->&base->lock FD: 26 BD: 2 +.+.: (work_completion)(&blkg->free_work) ->&q->blkcg_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&xa->xa_lock#8 ->pcpu_lock ->blk_queue_ida.xa_lock ->percpu_ref_switch_lock FD: 28 BD: 4 ....: command_done.lock ->&p->pi_lock FD: 17 BD: 2 +.+.: floppy_work ->dma_spin_lock ->floppy_lock ->&obj_hash[i].lock ->fdc_wait.lock FD: 1 BD: 3 ....: dma_spin_lock FD: 137 BD: 1 +.+.: loop_ctl_mutex ->fs_reclaim ->pool_lock#2 FD: 147 BD: 11 +.+.: &q->rq_qos_mutex ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->debugfs_mutex ->set->srcu ->&stats->lock ->&rq->__lock ->(&cb->timer) ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 FD: 1 BD: 12 ....: &stats->lock FD: 30 BD: 1 ..-.: &(&ops->cursor_work)->timer FD: 32 BD: 2 +.+.: (work_completion)(&(&ops->cursor_work)->work) ->(console_sem).lock ->&obj_hash[i].lock ->&base->lock FD: 147 BD: 9 +.+.: nbd_index_mutex ->fs_reclaim ->pool_lock#2 ->&nbd->config_lock FD: 1 BD: 17 .+.+: set->srcu FD: 34 BD: 8 +.+.: (work_completion)(&(&q->requeue_work)->work) ->&q->requeue_lock ->&hctx->lock ->&dd->lock FD: 18 BD: 8 +.+.: (work_completion)(&(&hctx->run_work)->work) FD: 322 BD: 1 +.+.: zram_index_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->blk_queue_ida.xa_lock ->&obj_hash[i].lock ->pcpu_alloc_mutex ->bio_slab_lock ->percpu_counters_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->lock ->&q->queue_lock ->&x->wait#9 ->&bdev->bd_size_lock ->&k->list_lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->subsys mutex#37 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->percpu_ref_switch_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#38 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->inode_hash_lock ->&cfs_rq->removed.lock ->(console_sem).lock FD: 3 BD: 1 +.+.: subsys mutex#39 ->&k->k_lock FD: 138 BD: 2 +.+.: &default_group_class[depth - 1]#2 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 2 BD: 1 +.+.: &lock ->nullb_indexes.xa_lock FD: 1 BD: 2 ....: nullb_indexes.xa_lock FD: 1 BD: 1 +.+.: ctx_list.lock FD: 1 BD: 1 ....: nfc_index_ida.xa_lock FD: 181 BD: 3 +.+.: nfc_devlist_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&rq->__lock ->subsys mutex#40 ->&k->k_lock ->&genl_data->genl_data_mutex FD: 3 BD: 4 +.+.: subsys mutex#40 ->&k->k_lock FD: 1 BD: 76 ....: &rfkill->lock FD: 3 BD: 10 +.+.: subsys mutex#41 ->&k->k_lock FD: 187 BD: 2 +.+.: (work_completion)(&rfkill->sync_work) ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 10 +.+.: rfkill_global_mutex.wait_lock FD: 1 BD: 1 +.+.: dma_heap_minors.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#42 ->&k->k_lock FD: 1 BD: 1 +.+.: heap_list_lock FD: 1 BD: 1 ....: host_index_ida.xa_lock FD: 167 BD: 1 +.+.: scsi_sense_cache_mutex ->slab_mutex FD: 27 BD: 4 +.+.: subsys mutex#43 ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#44 ->&k->k_lock FD: 1 BD: 167 -.-.: &virtscsi_vq->vq_lock FD: 340 BD: 3 +.+.: &shost->scan_mutex ->fs_reclaim ->pool_lock#2 ->shost->host_lock ->&dev->power.lock ->&x->wait#9 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->attribute_container_mutex ->blk_queue_ida.xa_lock ->pcpu_alloc_mutex ->&q->sysfs_lock ->&set->tag_list_lock ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&hctx->lock ->&base->lock ->&x->wait#16 ->&rq->__lock ->(&timer.timer) ->&sdev->state_mutex ->&q->mq_freeze_lock ->&q->mq_freeze_wq ->percpu_ref_switch_lock ->(&q->timeout) ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->cpu_hotplug_lock ->&xa->xa_lock#8 ->&q->unused_hctx_lock ->(work_completion)(&sdev->requeue_work) ->(work_completion)(&sdev->event_work) ->pcpu_lock ->&sdev->inquiry_mutex ->(console_sem).lock ->&tags->lock ->&cfs_rq->removed.lock ->quarantine_lock ->&x->wait#15 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#43 ->device_links_srcu ->async_lock ->gdp_mutex ->subsys mutex#45 ->bsg_minor_ida.xa_lock ->chrdevs_lock ->req_lock ->&p->pi_lock ->&x->wait#11 ->subsys mutex#55 FD: 1 BD: 4 ....: shost->host_lock FD: 2 BD: 3 +.+.: async_scan_lock ->&x->wait#15 FD: 1 BD: 5 ....: &x->wait#15 FD: 1 BD: 156 +.+.: &hctx->lock FD: 28 BD: 7 ..-.: &x->wait#16 ->&p->pi_lock FD: 1 BD: 4 +.+.: &sdev->state_mutex FD: 30 BD: 6 +.-.: (&q->timeout) FD: 15 BD: 7 +.+.: (work_completion)(&q->timeout_work) ->&tags->lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4 +.+.: (work_completion)(&sdev->requeue_work) FD: 1 BD: 4 +.+.: (work_completion)(&sdev->event_work) FD: 1 BD: 4 +.+.: &sdev->inquiry_mutex FD: 184 BD: 4 +.+.: subsys mutex#45 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->lock ->chrdevs_lock ->&x->wait#9 ->&obj_hash[i].lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#54 ->(console_sem).lock ->console_owner_lock ->console_owner FD: 1 BD: 1 +.+.: nvmf_hosts_mutex FD: 1 BD: 13 ....: &tags->lock FD: 3 BD: 1 +.+.: subsys mutex#46 ->&k->k_lock FD: 1 BD: 1 +.+.: nvmf_transports_rwsem FD: 3 BD: 1 +.+.: subsys mutex#47 ->&k->k_lock FD: 143 BD: 3 +.+.: &default_group_class[depth - 1]#3/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#4/2 FD: 1 BD: 1 +.+.: nvmet_config_sem FD: 3 BD: 1 +.+.: subsys mutex#48 ->&k->k_lock FD: 142 BD: 4 +.+.: &default_group_class[depth - 1]#4/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#5/2 FD: 141 BD: 5 +.+.: &default_group_class[depth - 1]#5/2 ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#6 ->&default_group_class[depth - 1]#6/2 FD: 138 BD: 6 +.+.: &default_group_class[depth - 1]#6 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 1 BD: 6 +.+.: &default_group_class[depth - 1]#6/2 FD: 1 BD: 1 +.+.: backend_mutex FD: 1 BD: 1 +.+.: scsi_mib_index_lock FD: 1 BD: 1 +.+.: hba_lock FD: 137 BD: 1 +.+.: device_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: &hba->device_lock FD: 335 BD: 1 +.+.: mtd_table_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#49 ->devtree_lock ->nvmem_ida.xa_lock ->nvmem_cell_mutex ->&k->k_lock ->&n->list_lock ->subsys mutex#50 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(console_sem).lock ->&cfs_rq->removed.lock ->pcpu_alloc_mutex ->cpu_hotplug_lock ->batched_entropy_u32.lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&q->mq_freeze_lock ->set->srcu ->percpu_ref_switch_lock ->&q->queue_lock ->&bdev->bd_size_lock ->elv_list_lock ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->&q->debugfs_mutex ->subsys mutex#37 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->subsys mutex#38 ->cgwb_lock ->bdi_lock ->inode_hash_lock FD: 1 BD: 1 +.+.: part_parser_lock FD: 1 BD: 87 ....: (kmod_concurrent_max).lock FD: 28 BD: 88 ....: &x->wait#17 ->&p->pi_lock FD: 1 BD: 154 ....: &prev->lock FD: 3 BD: 2 +.+.: subsys mutex#49 ->&k->k_lock FD: 1 BD: 2 ....: nvmem_ida.xa_lock FD: 1 BD: 2 +.+.: nvmem_cell_mutex FD: 1 BD: 2 +.+.: subsys mutex#50 FD: 1 BD: 70 +.+.: &bond->stats_lock FD: 33 BD: 3610 ....: lweventlist_lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&obj_hash[i].lock ->&base->lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 832 BD: 2 +.+.: (linkwatch_work).work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 3640 +.+.: rtnl_mutex.wait_lock FD: 858 BD: 1 +.+.: (wq_completion)gid-cache-wq ->(work_completion)(&ndev_work->work) ->(work_completion)(&work->work) ->&rq->__lock FD: 856 BD: 2 +.+.: (work_completion)(&ndev_work->work) ->devices_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->rcu_node_0 ->&base->lock FD: 3 BD: 79 ..-.: once_lock ->crngs.lock FD: 290 BD: 2 +.+.: (work_completion)(&w->work) ->cpu_hotplug_lock ->&obj_hash[i].lock FD: 27 BD: 72 ++++: (inet6addr_validator_chain).rwsem ->&rq->__lock FD: 27 BD: 69 ++++: (inetaddr_validator_chain).rwsem ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#51 ->&k->k_lock FD: 1 BD: 1 +.+.: mdio_board_lock FD: 1 BD: 1 +.+.: mode_list_lock FD: 1 BD: 69 +.+.: napi_hash_lock FD: 141 BD: 134 +.+.: xps_map_mutex ->fs_reclaim ->pool_lock#2 ->jump_label_mutex ->&obj_hash[i].lock ->krc.lock ->&rq->__lock FD: 1 BD: 2 +.+.: (work_completion)(&vi->config_work) FD: 1 BD: 1 +.+.: l3mdev_lock FD: 1 BD: 3 ....: sd_index_ida.xa_lock FD: 29 BD: 3 +.+.: subsys mutex#52 ->&rq->__lock ->&k->k_lock FD: 3 BD: 1 +.+.: subsys mutex#53 ->&k->k_lock FD: 2 BD: 1 +.+.: compressor_list_lock ->pool_lock#2 FD: 2 BD: 238 ....: sg_index_lock ->pool_lock#2 FD: 3 BD: 5 +.+.: subsys mutex#54 ->&k->k_lock FD: 1 BD: 4 ....: bsg_minor_ida.xa_lock FD: 3 BD: 4 +.+.: subsys mutex#55 ->&k->k_lock FD: 1 BD: 176 +.+.: &dd->lock FD: 28 BD: 4256 ..-.: &folio_wait_table[i] ->&p->pi_lock FD: 38 BD: 1 +.+.: (wq_completion)kblockd ->(work_completion)(&(&hctx->run_work)->work) ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&q->requeue_work)->work) FD: 1 BD: 5 ....: hwsim_netgroup_ida.xa_lock FD: 33 BD: 3639 +.-.: hwsim_radio_lock ->&c->lock ->pool_lock#2 ->&list->lock#16 ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->&zone->lock ->init_task.mems_allowed_seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 3 BD: 4 +.+.: subsys mutex#56 ->&k->k_lock FD: 333 BD: 72 +.+.: &rdev->wiphy.mtx ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#57 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->nl_table_lock ->&rq->__lock ->&cfs_rq->removed.lock ->nl_table_wait.lock ->reg_requests_lock ->stack_depot_init_mutex ->pcpu_alloc_mutex ->&local->iflist_mtx ->&xa->xa_lock#3 ->net_rwsem ->&x->wait#9 ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->&wdev->mtx ->&fq->lock ->&n->list_lock ->rlock-AF_NETLINK ->lweventlist_lock ->&pool->lock ->rcu_node_0 ->&data->mutex ->&base->lock ->&tn->lock ->failover_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&idev->mc_lock ->&pnettable->lock ->smc_ib_devices.mutex ->&wdev->event_lock ->&rdev->mgmt_registrations_lock ->(work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) ->&local->key_mtx ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->mount_lock ->&rdev->wiphy_work_lock ->(&dwork->timer) ->(work_completion)(&(&link->color_collision_detect_work)->work) ->&local->chanctx_mtx ->rtnl_mutex.wait_lock ->&p->pi_lock ->&lock->wait_lock ->&list->lock#15 ->lock#6 ->&____s->seqcount#2 ->quarantine_lock ->remove_cache_srcu ->&rcu_state.expedited_wq ->&local->mtx ->&list->lock#2 ->&data->lock ->stock_lock ->&sem->wait_lock ->&meta->lock ->kfence_freelist_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->batched_entropy_u8.lock ->&local->queue_stop_reason_lock ->&local->sta_mtx ->_xmit_ETHER ->(&local->dynamic_ps_timer) ->(work_completion)(&local->dynamic_ps_enable_work) ->(work_completion)(&sdata->recalc_smps) ->(work_completion)(&link->csa_finalize_work) ->(work_completion)(&link->color_change_finalize_work) ->(work_completion)(&(&link->dfs_cac_timer_work)->work) ->&local->filter_lock ->&rnp->exp_lock ->&rnp->exp_wq[3] ->(work_completion)(&ifmgd->monitor_work) ->(work_completion)(&(&ifmgd->tdls_peer_del_work)->work) FD: 3 BD: 73 +.+.: subsys mutex#57 ->&k->k_lock FD: 1 BD: 73 +.+.: reg_requests_lock FD: 27 BD: 86 +.+.: &local->iflist_mtx ->hrtimer_bases.lock ->&rq->__lock ->tk_core.seq.seqcount FD: 190 BD: 79 +.+.: &wdev->mtx ->&rdev->bss_lock ->&local->chanctx_mtx ->&rdev->wiphy_work_lock ->&rq->__lock ->&ifibss->incomplete_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&local->mtx ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hrtimer_bases.lock ->&obj_hash[i].lock ->&base->lock ->&wdev->event_lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->nl_table_lock ->nl_table_wait.lock ->&list->lock#2 ->&sta->lock ->&local->sta_mtx ->&____s->seqcount#2 ->remove_cache_srcu ->&zone->lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->lweventlist_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->krc.lock ->&list->lock#15 ->&data->lock ->(&ifibss->timer) ->&rnp->exp_lock ->rcu_state.exp_mutex ->&lock->wait_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&ifmgd->teardown_lock ->(&ifmgd->timer) ->&pgdat->kswapd_wait FD: 5 BD: 3638 +.-.: &fq->lock ->tk_core.seq.seqcount FD: 3 BD: 69 +.+.: subsys mutex#58 ->&k->k_lock FD: 2 BD: 70 +.+.: &sdata->sec_mtx ->&sec->lock FD: 1 BD: 75 ++..: &sec->lock FD: 1 BD: 69 +.+.: &local->iflist_mtx#2 FD: 137 BD: 1 +.+.: hwsim_phys_lock ->fs_reclaim ->pool_lock#2 FD: 137 BD: 1 +.+.: xdomain_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ioctl_mutex FD: 1 BD: 1 +.+.: address_handler_list_lock FD: 1 BD: 1 +.+.: card_mutex FD: 3 BD: 1 +.+.: subsys mutex#59 ->&k->k_lock FD: 28 BD: 1 ....: &x->wait#18 ->&p->pi_lock FD: 30 BD: 2 ..-.: &txlock ->&list->lock#3 ->&txwq FD: 1 BD: 3 ..-.: &list->lock#3 FD: 28 BD: 3 ..-.: &txwq ->&p->pi_lock FD: 2 BD: 1 ....: &iocq[i].lock ->&ktiowq[i] FD: 1 BD: 2 ....: &ktiowq[i] FD: 1 BD: 1 ....: rcu_read_lock_bh FD: 3 BD: 3660 +.-.: noop_qdisc.q.lock ->crngs.lock FD: 3 BD: 3 +.+.: subsys mutex#60 ->&k->k_lock FD: 226 BD: 1 +.+.: usb_bus_idr_lock ->(usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&dev->power.lock ->device_links_srcu ->&zone->lock ->&____s->seqcount ->&c->lock ->(console_sem).lock ->input_pool.lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&cfs_rq->removed.lock ->device_state_lock ->&dum_hcd->dum->lock ->subsys mutex#61 ->&x->wait#9 ->&lock->wait_lock ->&hub->irq_urb_lock ->(&hub->irq_urb_retry) ->&base->lock ->hcd_urb_unlink_lock ->(work_completion)(&hub->tt.clear_work) ->hcd_urb_list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&vhci_hcd->vhci->lock ->&meta->lock ->quarantine_lock FD: 30 BD: 1 ..-.: net/core/link_watch.c:31 FD: 174 BD: 1 +.+.: table_lock ->&k->list_lock ->fs_reclaim ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->uevent_sock_mutex ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->running_helpers_waitq.lock ->(console_sem).lock ->&c->lock FD: 1 BD: 3 +.+.: mon_lock FD: 172 BD: 2 +.+.: usb_port_peer_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->dev_pm_qos_mtx ->component_mutex ->device_links_srcu ->dev_pm_qos_sysfs_mtx ->&rq->__lock ->&zone->lock ->sysfs_symlink_target_lock FD: 1 BD: 4 ....: device_state_lock FD: 30 BD: 8 ....: hcd_root_hub_lock ->hcd_urb_list_lock ->&bh->lock ->&p->pi_lock FD: 1 BD: 9 ....: hcd_urb_list_lock FD: 1 BD: 9 ..-.: &bh->lock FD: 4 BD: 82 ..-.: lock#6 ->kcov_remote_lock ->&kcov->lock FD: 2 BD: 147 ..-.: kcov_remote_lock ->pool_lock#2 FD: 28 BD: 8 ..-.: &x->wait#19 ->&p->pi_lock FD: 32 BD: 1 ..-.: drivers/block/floppy.c:640 FD: 37 BD: 1 +.+.: (fd_timeout).work ->&obj_hash[i].lock ->floppy_work ->dma_spin_lock ->floppy_lock ->command_done.lock FD: 1 BD: 2 +.+.: set_config_lock FD: 143 BD: 2 +.+.: hcd->bandwidth_mutex ->devtree_lock ->&obj_hash[i].lock ->&x->wait#9 ->&dev->power.lock ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&zone->lock ->&____s->seqcount ->&rq->__lock ->&x->wait#19 ->&c->lock FD: 1 BD: 2 +.+.: &new_driver->dynids.lock FD: 1 BD: 152 ....: irq_resend_lock FD: 1 BD: 83 +.+.: &ent->pde_unload_lock FD: 1 BD: 5 ....: &dum_hcd->dum->lock FD: 1 BD: 3 ....: fdc_wait.lock FD: 1 BD: 3 ....: (&motor_off_timer[drive]) FD: 1 BD: 299 ....: (&sq->pending_timer) FD: 1 BD: 5 +.+.: (work_completion)(&td->dispatch_work) FD: 23 BD: 7 +.+.: &q->blkcg_mutex ->&q->queue_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 141 BD: 4257 .+.+: &fsnotify_mark_srcu ->&conn->lock ->fs_reclaim ->pool_lock#2 ->&group->notification_lock ->&group->notification_waitq ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&cfs_rq->removed.lock ->remove_cache_srcu ->&____s->seqcount#2 ->&____s->seqcount ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 144 BD: 4 +.+.: &hub->status_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->hcd_root_hub_lock ->fs_reclaim ->&dum_hcd->dum->lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&base->lock ->&pool->lock ->(&timer.timer) ->&c->lock ->&zone->lock ->&____s->seqcount ->&vhci_hcd->vhci->lock FD: 1 BD: 3 +.+.: component_mutex FD: 146 BD: 2 +.+.: (work_completion)(&(&hub->init_work)->work) FD: 1 BD: 2 +.+.: subsys mutex#61 FD: 37 BD: 1 +.+.: (wq_completion)usb_hub_wq ->(work_completion)(&hub->events) FD: 36 BD: 2 +.+.: (work_completion)(&hub->events) ->lock#6 ->&dev->power.lock FD: 1 BD: 4 ....: &hub->irq_urb_lock FD: 1 BD: 4 ....: (&hub->irq_urb_retry) FD: 1 BD: 4 ....: hcd_urb_unlink_lock FD: 28 BD: 3 ..-.: usb_kill_urb_queue.lock ->&p->pi_lock FD: 1 BD: 4 +.+.: (work_completion)(&hub->tt.clear_work) FD: 1 BD: 1 ..-.: percpu_ref_switch_waitq.lock FD: 1 BD: 8 +.+.: udc_lock FD: 3 BD: 1 +.+.: subsys mutex#62 ->&k->k_lock FD: 1 BD: 1 ....: gadget_id_numbers.xa_lock FD: 144 BD: 2 +.+.: (work_completion)(&gadget->work) ->&root->kernfs_rwsem ->kernfs_notify_lock FD: 30 BD: 155 ....: kernfs_notify_lock FD: 64 BD: 2 +.+.: kernfs_notify_work ->kernfs_notify_lock ->&root->kernfs_supers_rwsem FD: 62 BD: 7 ++++: &root->kernfs_supers_rwsem ->inode_hash_lock FD: 1 BD: 1 +.+.: subsys mutex#63 FD: 1 BD: 1 +.+.: func_lock FD: 1 BD: 1 +.+.: g_tf_lock FD: 1 BD: 7 ....: &vhci_hcd->vhci->lock FD: 21 BD: 1 +.-.: (&vblank->disable_timer) ->&dev->vbl_lock FD: 40 BD: 7 -.-.: i8042_lock ->(console_sem).lock ->&x->wait#20 FD: 1 BD: 8 -...: &x->wait#20 FD: 30 BD: 4 ....: serio_event_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 249 BD: 1 +.+.: (wq_completion)events_long ->serio_event_work ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&br->gc_work)->work) ->(work_completion)(&br->mcast_gc_work) ->(work_completion)(&(&ipvs->est_reload_work)->work) ->&rq->__lock FD: 224 BD: 2 +.+.: serio_event_work ->serio_mutex FD: 223 BD: 3 +.+.: serio_mutex ->serio_event_lock ->i8042_lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&device->physical_node_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->semaphore->lock ->&obj_hash[i].lock ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#64 ->bus_type_sem FD: 1 BD: 4 +.+.: subsys mutex#64 FD: 2 BD: 7 ....: input_ida.xa_lock ->pool_lock#2 FD: 39 BD: 7 +.+.: &mousedev->mutex/1 ->&mousedev->mutex#2 FD: 210 BD: 4 +.+.: &serio->drv_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&serio->lock ->i8042_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->(console_sem).lock ->input_mutex ->i8042_lock ->psmouse_mutex FD: 35 BD: 7 -.-.: &serio->lock ->&ps2dev->wait ->&dev->power.lock ->&dev->event_lock#2 FD: 45 BD: 6 +.+.: i8042_mutex ->&serio->lock ->i8042_lock ->&ps2dev->wait ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->&cfs_rq->removed.lock ->pool_lock#2 FD: 28 BD: 8 -.-.: &ps2dev->wait ->&p->pi_lock FD: 1 BD: 1 ....: rtc_ida.xa_lock FD: 2 BD: 1 +.+.: &rtc->ops_lock ->rtc_lock FD: 1 BD: 2 ....: platform_devid_ida.xa_lock FD: 1 BD: 2 ....: rtcdev_lock FD: 184 BD: 7 +.+.: &led_cdev->led_access ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#65 ->leds_list_lock ->triggers_list_lock FD: 3 BD: 8 +.+.: subsys mutex#65 ->&k->k_lock FD: 150 BD: 19 +.+.: &led_cdev->trigger_lock ->fs_reclaim ->pool_lock#2 ->&trig->leddev_list_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 20 +.+.: &trig->leddev_list_lock FD: 1 BD: 22 -...: &dev->event_lock#2 FD: 209 BD: 5 +.+.: psmouse_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&x->wait#9 ->&obj_hash[i].lock ->&serio->lock ->i8042_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->(console_sem).lock ->&rq->__lock ->input_mutex FD: 137 BD: 1 +.+.: g_smscore_deviceslock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: cx231xx_devlist_mutex FD: 1 BD: 1 +.+.: em28xx_devlist_mutex FD: 1 BD: 1 ....: pvr2_context_sync_data.lock FD: 1 BD: 15 +.+.: i2c_dev_list_lock FD: 3 BD: 8 +.+.: subsys mutex#66 ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#67 FD: 186 BD: 2 +.+.: dvbdev_register_lock ->(console_sem).lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->minor_rwsem ->&xa->xa_lock#10 ->&mdev->graph_mutex ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&c->lock ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#68 FD: 187 BD: 1 +.+.: frontend_mutex ->fs_reclaim ->pool_lock#2 ->(console_sem).lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->dvbdev_register_lock FD: 1 BD: 3 +.+.: minor_rwsem FD: 2 BD: 3 ....: &xa->xa_lock#10 ->pool_lock#2 FD: 137 BD: 4 +.+.: &mdev->graph_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 3 BD: 3 +.+.: subsys mutex#68 ->&k->k_lock FD: 1 BD: 1 ....: &dmxdev->lock FD: 1 BD: 1 +.+.: &dvbdemux->mutex FD: 1 BD: 1 +.+.: media_devnode_lock FD: 1 BD: 1 +.+.: subsys mutex#69 FD: 1 BD: 1 +.+.: videodev_lock FD: 3 BD: 1 +.+.: subsys mutex#70 ->&k->k_lock FD: 1 BD: 1 +.+.: vimc_sensor:393:(&vsensor->hdl)->_lock FD: 1 BD: 1 +.+.: &v4l2_dev->lock FD: 1 BD: 1 +.+.: vimc_debayer:578:(&vdebayer->hdl)->_lock FD: 1 BD: 1 +.+.: vimc_lens:61:(&vlens->hdl)->_lock FD: 147 BD: 1 +.+.: vivid_ctrls:1606:(hdl_user_gen)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->vivid_ctrls:1627:(hdl_vbi_out)->_lock ->vivid_ctrls:1630:(hdl_radio_rx)->_lock ->vivid_ctrls:1632:(hdl_radio_tx)->_lock ->vivid_ctrls:1634:(hdl_sdr_cap)->_lock ->vivid_ctrls:1636:(hdl_meta_cap)->_lock ->vivid_ctrls:1638:(hdl_meta_out)->_lock ->vivid_ctrls:1640:(hdl_tch_cap)->_lock ->&zone->lock ->&obj_hash[i].lock ->&rq->__lock FD: 138 BD: 1 +.+.: vivid_ctrls:1608:(hdl_user_vid)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&zone->lock FD: 141 BD: 1 +.+.: vivid_ctrls:1610:(hdl_user_aud)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1630:(hdl_radio_rx)->_lock ->vivid_ctrls:1632:(hdl_radio_tx)->_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 145 BD: 1 +.+.: vivid_ctrls:1612:(hdl_streaming)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->vivid_ctrls:1627:(hdl_vbi_out)->_lock ->vivid_ctrls:1634:(hdl_sdr_cap)->_lock ->vivid_ctrls:1636:(hdl_meta_cap)->_lock ->vivid_ctrls:1638:(hdl_meta_out)->_lock ->vivid_ctrls:1640:(hdl_tch_cap)->_lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 139 BD: 1 +.+.: vivid_ctrls:1614:(hdl_sdtv_cap)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->&c->lock ->&____s->seqcount FD: 139 BD: 1 +.+.: vivid_ctrls:1616:(hdl_loop_cap)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 1 +.+.: vivid_ctrls:1618:(hdl_fb)->_lock FD: 1 BD: 7 +.+.: vivid_ctrls:1620:(hdl_vid_cap)->_lock FD: 1 BD: 4 +.+.: vivid_ctrls:1622:(hdl_vid_out)->_lock FD: 1 BD: 5 +.+.: vivid_ctrls:1625:(hdl_vbi_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1627:(hdl_vbi_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1630:(hdl_radio_rx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1632:(hdl_radio_tx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1634:(hdl_sdr_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1636:(hdl_meta_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1638:(hdl_meta_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1640:(hdl_tch_cap)->_lock FD: 1 BD: 1 ....: &adap->kthread_waitq FD: 1 BD: 1 +.+.: &dev->cec_xfers_slock FD: 1 BD: 1 +.+.: cec_devnode_lock FD: 1 BD: 1 ....: &dev->kthread_waitq_cec FD: 1 BD: 1 +.+.: subsys mutex#71 FD: 6 BD: 1 +.+.: &adap->lock ->tk_core.seq.seqcount ->&adap->devnode.lock_fhs FD: 1 BD: 2 +.+.: &adap->devnode.lock_fhs FD: 1 BD: 1 ....: ptp_clocks_map.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#72 ->&k->k_lock FD: 1 BD: 1 +.+.: pers_lock FD: 1 BD: 1 +.+.: _lock FD: 1 BD: 3 +.+.: dm_bufio_clients_lock FD: 1 BD: 1 +.+.: _ps_lock FD: 1 BD: 1 +.+.: _lock#2 FD: 1 BD: 1 +.+.: _lock#3 FD: 1 BD: 1 +.+.: register_lock#2 FD: 3 BD: 1 +.+.: subsys mutex#73 ->&k->k_lock FD: 1 BD: 1 .+.+: bp_lock FD: 3 BD: 1 +.+.: subsys mutex#74 ->&k->k_lock FD: 16 BD: 1 +.-.: (&dsp_spl_tl) ->dsp_lock FD: 15 BD: 2 ..-.: dsp_lock ->iclock_lock ->&obj_hash[i].lock ->&base->lock FD: 5 BD: 3 ...-: iclock_lock ->tk_core.seq.seqcount FD: 144 BD: 75 +.+.: lock#7 ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#12 ->&rq->__lock ->crngs.lock ->&xa->xa_lock#19 ->&id_priv->qp_mutex ->&id_priv->lock ->&xa->xa_lock#20 ->&cm_id_priv->lock ->&c->lock FD: 1 BD: 1 +.+.: intf_mutex FD: 1 BD: 1 ....: iscsi_transport_lock FD: 3 BD: 1 +.+.: subsys mutex#75 ->&k->k_lock FD: 859 BD: 2 ++++: link_ops_rwsem ->fs_reclaim ->pool_lock#2 ->&c->lock ->&x->wait#9 ->&rq->__lock ->&obj_hash[i].lock ->(console_sem).lock ->&n->list_lock ->&pdata->netdev_lock ->ndev_hash_lock ->crypto_alg_sem ->devices_rwsem ->&rxe->usdev_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&device->cache_lock ->rdmacg_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#84 ->&____s->seqcount#2 ->&____s->seqcount ->&zone->lock ->rcu_node_0 ->uevent_sock_mutex FD: 1 BD: 1 ....: &tx_task->waiting FD: 3 BD: 1 +.+.: subsys mutex#76 ->&k->k_lock FD: 1 BD: 1 +.+.: service_lock FD: 1 BD: 1 +.+.: vsock_register_mutex FD: 1 BD: 1 +.+.: comedi_drivers_list_lock FD: 3 BD: 6 +.+.: subsys mutex#77 ->&k->k_lock FD: 168 BD: 2 ++++: snd_ctl_layer_rwsem ->snd_ctl_led_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 3 +.+.: snd_card_mutex FD: 1 BD: 1 +.+.: snd_ioctl_rwsem FD: 137 BD: 2 +.+.: strings ->fs_reclaim ->pool_lock#2 FD: 1 BD: 2 +.+.: register_mutex FD: 182 BD: 3 +.+.: sound_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#77 ->&zone->lock ->&k->k_lock FD: 192 BD: 1 +.+.: register_mutex#2 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->&obj_hash[i].lock ->register_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->sound_oss_mutex ->strings ->&entry->access ->info_mutex FD: 184 BD: 1 +.+.: register_mutex#3 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->&c->lock ->&____s->seqcount ->clients_lock ->&zone->lock FD: 1 BD: 5 ....: clients_lock FD: 2 BD: 1 +.+.: &client->ports_mutex ->&client->ports_lock FD: 1 BD: 5 .+.+: &client->ports_lock FD: 185 BD: 1 +.+.: register_mutex#4 ->fs_reclaim ->pool_lock#2 ->sound_oss_mutex ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 184 BD: 3 +.+.: sound_oss_mutex ->fs_reclaim ->pool_lock#2 ->sound_loader_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#77 ->&cfs_rq->removed.lock ->&zone->lock ->&k->k_lock FD: 1 BD: 4 +.+.: sound_loader_lock FD: 140 BD: 1 .+.+: &grp->list_mutex/1 ->clients_lock ->&client->ports_lock ->register_lock#3 ->fs_reclaim ->pool_lock#2 FD: 2 BD: 1 +.+.: &grp->list_mutex#2 ->&grp->list_lock FD: 1 BD: 2 ....: &grp->list_lock FD: 147 BD: 2 +.+.: async_lookup_work ->fs_reclaim ->pool_lock#2 ->clients_lock ->&client->ports_lock ->snd_card_mutex ->(kmod_concurrent_max).lock ->&obj_hash[i].lock ->&x->wait#17 ->&pool->lock ->&rq->__lock ->running_helpers_waitq.lock ->autoload_work ->&x->wait#10 FD: 1 BD: 2 ....: register_lock#3 FD: 4 BD: 3 +.+.: autoload_work ->&k->list_lock ->&k->k_lock FD: 171 BD: 1 ++++: &card->controls_rwsem ->&xa->xa_lock#11 ->fs_reclaim ->&card->ctl_files_rwlock ->snd_ctl_layer_rwsem ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 9 BD: 2 +.+.: &xa->xa_lock#11 ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&zone->lock FD: 1 BD: 2 ....: &card->ctl_files_rwlock FD: 1 BD: 3 +.+.: snd_ctl_led_mutex FD: 1 BD: 1 +.+.: register_mutex#5 FD: 1 BD: 73 +.+.: failover_lock FD: 9 BD: 5 +...: llc_sap_list_lock ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 FD: 137 BD: 1 +.+.: act_id_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 69 ++++: act_mod_lock FD: 1 BD: 69 ++++: ife_mod_lock FD: 1 BD: 69 +.+.: nf_connlabels_lock FD: 1 BD: 69 ++++: cls_mod_lock FD: 1 BD: 1 +.+.: ematch_mod_lock FD: 450 BD: 4 +.+.: sock_diag_table_mutex ->nlk_cb_mutex-SOCK_DIAG ->pool_lock#2 ->&obj_hash[i].lock ->rlock-AF_NETLINK ->inet_diag_table_mutex ->(kmod_concurrent_max).lock ->fs_reclaim ->&rq->__lock ->&x->wait#17 ->running_helpers_waitq.lock FD: 1 BD: 1 +.+.: nfnl_subsys_acct FD: 1 BD: 1 +.+.: nfnl_subsys_queue FD: 1 BD: 1 +.+.: nfnl_subsys_ulog FD: 27 BD: 5 +.+.: nf_log_mutex ->&rq->__lock FD: 1 BD: 1 +.+.: nfnl_subsys_osf FD: 31 BD: 4 +.+.: nf_sockopt_mutex ->&rq->__lock ->nf_sockopt_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 148 BD: 2 +.+.: nfnl_subsys_ctnetlink ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->rlock-AF_NETLINK ->&cfs_rq->removed.lock ->nlk_cb_mutex-NETFILTER ->&lock->wait_lock ->fs_reclaim ->(console_sem).lock FD: 40 BD: 1 +.+.: nfnl_subsys_ctnetlink_exp ->(console_sem).lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 5 +.+.: nf_ct_ecache_mutex FD: 1 BD: 1 +.+.: nfnl_subsys_cttimeout FD: 1 BD: 1 +.+.: nfnl_subsys_cthelper FD: 1 BD: 1 +.+.: nf_ct_helper_mutex FD: 1 BD: 1 +...: nf_conntrack_expect_lock FD: 36 BD: 7 +.+.: nf_conntrack_mutex ->&nf_conntrack_locks[i] ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->nf_conntrack_mutex.wait_lock ->&pool->lock ->&____s->seqcount#7 ->&nf_conntrack_locks[i]/1 ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: nf_ct_nat_helpers_mutex FD: 309 BD: 1 +.+.: nfnl_subsys_nftables ->&nft_net->commit_mutex ->&rq->__lock ->&lock->wait_lock FD: 1 BD: 1 +.+.: nfnl_subsys_nftcompat FD: 980 BD: 1 +.+.: masq_mutex ->pernet_ops_rwsem ->(inetaddr_chain).rwsem ->inet6addr_chain.lock FD: 237 BD: 5 +.+.: &xt[i].mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&mm->mmap_lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->&per_cpu(xt_recseq, i) ->&obj_hash[i].lock ->purge_vmap_area_lock ->&rq->__lock ->&____s->seqcount#2 ->init_mm.page_table_lock ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->rcu_node_0 ->&zone->lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->quarantine_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->&base->lock FD: 30 BD: 3647 +.+.: &tn->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 3 BD: 1 +.+.: subsys mutex#78 ->&k->k_lock FD: 151 BD: 5 +.+.: nfnl_subsys_ipset ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->rcu_state.barrier_mutex ->ip_set_ref_lock ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->rcu_node_0 ->&zone->lock ->&lock->wait_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->stock_lock ->crngs.lock ->&cfs_rq->removed.lock ->purge_vmap_area_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: ip_set_type_mutex FD: 144 BD: 72 +.+.: ipvs->est_mutex ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&n->list_lock ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->pcpu_lock ->&obj_hash[i].lock ->quarantine_lock ->&____s->seqcount#2 ->&____s->seqcount ->kthread_create_lock ->&x->wait ->&pool->lock ->(console_sem).lock FD: 1 BD: 70 +.+.: ip_vs_sched_mutex FD: 137 BD: 5 +.+.: __ip_vs_app_mutex ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&rq->__lock ->&____s->seqcount#2 FD: 1 BD: 1 +.+.: ip_vs_pe_mutex FD: 1 BD: 1 +.+.: tunnel4_mutex FD: 1 BD: 1 +.+.: xfrm4_protocol_mutex FD: 443 BD: 6 +.+.: inet_diag_table_mutex ->&h->lhash2[i].lock ->&rq->__lock ->&hashinfo->ehash_locks[i] ->sk_lock-AF_INET ->slock-AF_INET ->rcu_node_0 FD: 1 BD: 1 +...: xfrm_km_lock FD: 1 BD: 1 +...: xfrm_translator_lock FD: 1 BD: 1 +.+.: xfrm6_protocol_mutex FD: 1 BD: 1 +.+.: tunnel6_mutex FD: 1 BD: 1 +.+.: xfrm_if_cb_lock FD: 1 BD: 1 +...: inetsw6_lock FD: 1 BD: 7 +.+.: &hashinfo->lock#2 FD: 19 BD: 5 +.+.: &net->ipv6.ip6addrlbl_table.lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 220 BD: 3587 +.+.: &idev->mc_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&dev_addr_list_lock_key ->_xmit_ETHER ->&c->lock ->&zone->lock ->&____s->seqcount ->batched_entropy_u32.lock ->&base->lock ->krc.lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&bridge_netdev_addr_lock_key ->&dev_addr_list_lock_key#2 ->&batadv_netdev_addr_lock_key ->&rq->__lock ->&vlan_netdev_addr_lock_key ->&macvlan_netdev_addr_lock_key ->&dev_addr_list_lock_key#3 ->&bridge_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#2/1 ->&pool->lock ->_xmit_ETHER/1 ->rcu_node_0 ->&batadv_netdev_addr_lock_key/1 ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key#3/1 ->&macsec_netdev_addr_lock_key/1 ->&____s->seqcount#2 ->&lock->wait_lock ->_xmit_IPGRE ->&macvlan_netdev_addr_lock_key/2 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&dev_addr_list_lock_key#3/2 ->&macvlan_netdev_addr_lock_key/3 FD: 19 BD: 3589 +...: &dev_addr_list_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->&____s->seqcount#2 ->&zone->lock FD: 43 BD: 3603 +...: _xmit_ETHER ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&local->filter_lock ->(console_sem).lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 834 BD: 1 +.+.: (wq_completion)ipv6_addrconf ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->(work_completion)(&(&ifa->dad_work)->work) ->&rq->__lock FD: 832 BD: 6 +.+.: (work_completion)(&(&net->ipv6.addr_chk_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 28 BD: 82 ....: &x->wait#21 ->&p->pi_lock FD: 57 BD: 3697 ++--: &ndev->lock ->&ifa->lock ->pool_lock#2 ->&dir->lock#2 ->pcpu_lock ->&tb->tb6_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->batched_entropy_u32.lock ->&base->lock ->&____s->seqcount#2 ->&n->list_lock FD: 10 BD: 1 +.+.: stp_proto_mutex ->llc_sap_list_lock FD: 1 BD: 1 ....: switchdev_notif_chain.lock FD: 27 BD: 69 ++++: (switchdev_blocking_notif_chain).rwsem ->&rq->__lock FD: 834 BD: 1 +.+.: br_ioctl_mutex ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->stock_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&dir->lock#2 ->netdev_unregistering_wq.lock ->&rq->__lock ->rcu_state.barrier_mutex.wait_lock ->&cfs_rq->removed.lock ->br_ioctl_mutex.wait_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 292 BD: 72 +.+.: nf_ct_proto_mutex ->defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->defrag6_mutex ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 236 BD: 8 +.+.: ebt_mutex ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->ebt_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: dsa_tag_drivers_lock FD: 1 BD: 1 +...: protocol_list_lock FD: 1 BD: 1 +...: linkfail_lock FD: 1 BD: 1 +...: rose_neigh_list_lock FD: 1 BD: 1 +.+.: proto_tab_lock#2 FD: 1 BD: 20 ++++: chan_list_lock FD: 1 BD: 4 +.+.: l2cap_sk_list.lock FD: 242 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->chan_list_lock ->&ei->socket.wq.wait ->&mm->mmap_lock ->&rq->__lock ->(console_sem).lock FD: 29 BD: 21 +...: slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&sk->sk_lock.wq#2 FD: 1 BD: 1 ....: rfcomm_wq.lock FD: 1 BD: 1 +.+.: rfcomm_mutex FD: 1 BD: 1 +.+.: auth_domain_lock FD: 1 BD: 1 +.+.: registered_mechs_lock FD: 1 BD: 1 ....: atm_dev_notify_chain.lock FD: 1 BD: 1 +.+.: proto_tab_lock#3 FD: 834 BD: 1 +.+.: vlan_ioctl_mutex ->&mm->mmap_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->vlan_ioctl_mutex.wait_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_state.barrier_mutex ->rcu_state.barrier_mutex.wait_lock ->dev_base_lock ->lweventlist_lock ->pcpu_lock ->&dir->lock#2 ->krc.lock ->netdev_unregistering_wq.lock ->&____s->seqcount ->stock_lock FD: 1 BD: 1 +.+.: rds_info_lock FD: 148 BD: 8 ++++: rds_trans_sem ->(console_sem).lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->crngs.lock ->&id_priv->handler_mutex ->id_table_lock ->&x->wait#27 ->&obj_hash[i].lock ->&id_priv->lock ->lock#7 FD: 1 BD: 77 ....: &id_priv->lock FD: 2 BD: 76 +.+.: &xa->xa_lock#12 ->pool_lock#2 FD: 208 BD: 84 +.+.: k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&table->hash[i].lock ->k-clock-AF_INET6 ->&queue->rskq_lock ->&obj_hash[i].lock ->&rq->__lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->fs_reclaim ->&c->lock ->pool_lock#2 ->rcu_node_0 ->&base->lock ->&hashinfo->ehash_locks[i] ->slock-AF_INET6 ->clock-AF_INET6 ->&n->list_lock ->&dir->lock ->&____s->seqcount ->&rcu_state.expedited_wq ->remove_cache_srcu ->&cfs_rq->removed.lock ->&data->lock FD: 87 BD: 88 +.-.: k-slock-AF_INET6 ->&c->lock ->pool_lock#2 ->tk_core.seq.seqcount ->&obj_hash[i].lock ->crngs.lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET6 ->krc.lock ->&hashinfo->ehash_locks[i] ->(&req->rsk_timer) ->&base->lock ->&queue->rskq_lock ->k-clock-AF_INET6 ->quarantine_lock ->key#24 ->&(&bp->lock)->lock FD: 32 BD: 124 ++.-: k-clock-AF_INET6 FD: 25 BD: 120 +.-.: &tcp_hashinfo.bhash[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&tcp_hashinfo.bhash2[i].lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->stock_lock ->&obj_hash[i].lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->k-clock-AF_INET FD: 23 BD: 121 +.-.: &tcp_hashinfo.bhash2[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->&hashinfo->ehash_locks[i] ->stock_lock ->&n->list_lock ->k-clock-AF_INET ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&____s->seqcount#2 FD: 19 BD: 97 +.+.: &h->lhash2[i].lock ->clock-AF_INET6 ->reuseport_lock ->k-clock-AF_INET6 FD: 1 BD: 5 +...: &list->lock#4 FD: 1 BD: 9 +...: k-clock-AF_TIPC FD: 41 BD: 8 +.+.: k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&tn->nametbl_lock ->&obj_hash[i].lock ->k-clock-AF_TIPC ->&rq->__lock FD: 17 BD: 9 +...: k-slock-AF_TIPC ->&list->lock#38 ->&data->lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock FD: 21 BD: 9 +...: &tn->nametbl_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&service->lock ->&nt->cluster_scope_lock ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->&____s->seqcount#2 FD: 19 BD: 13 +...: &service->lock ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->&____s->seqcount#2 FD: 27 BD: 75 +.+.: &pnettable->lock ->&rq->__lock FD: 27 BD: 75 +.+.: smc_ib_devices.mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: smc_wr_rx_hash_lock FD: 1 BD: 1 +.+.: v9fs_trans_lock FD: 1 BD: 5 +...: &this->receive_lock FD: 1 BD: 1 +...: lowpan_nhc_lock FD: 834 BD: 8 +.+.: ovs_mutex ->(work_completion)(&data->gc_work) ->nf_ct_proto_mutex ->&obj_hash[i].lock ->pool_lock#2 ->nf_connlabels_lock ->net_rwsem ->&rq->__lock ->quarantine_lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pcpu_alloc_mutex ->stock_lock ->stack_depot_init_mutex ->crngs.lock ->rtnl_mutex ->ovs_mutex.wait_lock ->&pool->lock ->pcpu_lock ->krc.lock ->&dir->lock#2 ->(console_sem).lock ->&cfs_rq->removed.lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 290 BD: 73 +.+.: defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 290 BD: 73 +.+.: defrag6_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 136 +.+.: subsys mutex#79 FD: 30 BD: 1 ..-.: &(&gc_work->dwork)->timer FD: 37 BD: 2 +.+.: (work_completion)(&(&gc_work->dwork)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->pool_lock#2 ->&base->lock FD: 1 BD: 3681 ...-: &____s->seqcount#7 FD: 30 BD: 1 ..-.: &(&ipvs->defense_work)->timer FD: 34 BD: 6 +.+.: (work_completion)(&(&ipvs->defense_work)->work) ->&s->s_inode_list_lock ->&ipvs->dropentry_lock ->&ipvs->droppacket_lock ->&ipvs->securetcp_lock ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq FD: 1 BD: 7 +...: &ipvs->dropentry_lock FD: 1 BD: 7 +...: &ipvs->droppacket_lock FD: 1 BD: 7 +...: &ipvs->securetcp_lock FD: 30 BD: 1 ..-.: lib/debugobjects.c:101 FD: 33 BD: 2 +.+.: (debug_obj_work).work ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->rcu_node_0 ->quarantine_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 14 BD: 5 +.-.: (&net->can.stattimer) ->&obj_hash[i].lock ->&base->lock FD: 37 BD: 2 +.+.: drain_vmap_work ->vmap_purge_lock FD: 12 BD: 239 +...: map_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 1 ....: rcu_read_lock_sched FD: 13 BD: 238 +.-.: prog_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 1 BD: 76 +.-.: bpf_lock FD: 1 BD: 1 ....: rcu_read_lock_trace FD: 10 BD: 238 +...: btf_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 1 BD: 1 +.+.: &map->freeze_mutex FD: 1 BD: 6 +.+.: ima_keys_lock FD: 140 BD: 135 +.+.: scomp_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&c->lock FD: 28 BD: 1 +.+.: pcpu_drain_mutex ->&pcp->lock ->&rq->__lock FD: 30 BD: 1 ..-.: &(&ovs_net->masks_rebalance)->timer FD: 835 BD: 6 +.+.: (work_completion)(&(&ovs_net->masks_rebalance)->work) ->ovs_mutex ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 456 BD: 5 +.+.: k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->local_mutex ->&local->services_lock ->fs_reclaim ->pool_lock#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&c->lock ->&rx->incoming_lock ->&obj_hash[i].lock ->&____s->seqcount ->&rxnet->conn_lock ->&call->waitq ->(rxrpc_call_limiter).lock ->&rx->recvmsg_lock ->&rx->call_lock ->&rxnet->call_lock ->(&call->timer) ->&base->lock ->&list->lock#23 ->&n->list_lock ->&rq->__lock ->&meta->lock ->quarantine_lock ->&____s->seqcount#2 FD: 1 BD: 6 +...: k-slock-AF_RXRPC FD: 443 BD: 7 +.+.: &rxnet->local_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->crngs.lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&____s->seqcount ->&c->lock ->&dir->lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->cpu_hotplug_lock ->&rq->__lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&x->wait#22 ->&n->list_lock ->stock_lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->rcu_node_0 ->&table->hash[i].lock ->k-clock-AF_INET6 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&____s->seqcount#2 ->k-sk_lock-AF_INET ->k-slock-AF_INET FD: 20 BD: 95 +...: &table->hash[i].lock ->k-clock-AF_INET6 ->&table->hash2[i].lock ->k-clock-AF_INET ->clock-AF_INET ->clock-AF_INET6 FD: 1 BD: 96 +...: &table->hash2[i].lock FD: 290 BD: 2 +.+.: netstamp_work ->cpu_hotplug_lock FD: 28 BD: 8 ....: &x->wait#22 ->&p->pi_lock FD: 1 BD: 6 +.+.: &local->services_lock FD: 1 BD: 10 +.+.: &rxnet->conn_lock FD: 1 BD: 6 ....: &call->waitq FD: 1 BD: 6 +.+.: &rx->call_lock FD: 1 BD: 6 +.+.: &rxnet->call_lock FD: 32 BD: 5 +.-.: (&rxnet->peer_keepalive_timer) FD: 145 BD: 1 +.+.: init_user_ns.keyring_sem ->key_user_lock ->root_key_user.lock ->fs_reclaim ->pool_lock#2 ->crngs.lock ->key_serial_lock ->key_construction_mutex ->&type->lock_class ->keyring_serialise_link_lock FD: 1 BD: 5 +.+.: root_key_user.lock FD: 18 BD: 7 +.+.: (wq_completion)krxrpcd ->(work_completion)(&rxnet->peer_keepalive_work) ->(work_completion)(&rxnet->service_conn_reaper) FD: 15 BD: 8 +.+.: (work_completion)(&rxnet->peer_keepalive_work) ->&rxnet->peer_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 9 +.+.: &rxnet->peer_hash_lock FD: 1 BD: 6 +.+.: keyring_name_lock FD: 1 BD: 1 +.+.: template_list FD: 1 BD: 1 +.+.: idr_lock FD: 137 BD: 9 +.+.: ima_extend_list_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->&rq->__lock ->rcu_node_0 ->remove_cache_srcu ->ima_extend_list_mutex.wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 1 +.+.: clk_debug_lock FD: 28 BD: 4 +.+.: deferred_probe_work ->deferred_probe_mutex FD: 27 BD: 69 +.+.: &(&net->nexthop.notifier_chain)->rwsem ->&rq->__lock FD: 296 BD: 83 +.+.: k-sk_lock-AF_INET ->k-slock-AF_INET ->&table->hash[i].lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->&tcp_hashinfo.bhash[i].lock ->&rq->__lock ->k-clock-AF_INET ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->batched_entropy_u16.lock ->&mm->mmap_lock ->&cfs_rq->removed.lock ->tk_core.seq.seqcount ->fs_reclaim ->&base->lock ->&hashinfo->ehash_locks[i] ->slock-AF_INET ->&in_dev->mc_tomb_lock ->&im->lock ->&c->lock ->&n->list_lock ->stock_lock ->&data->lock ->&h->lhash2[i].lock FD: 44 BD: 85 +...: k-slock-AF_INET ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&c->lock ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->krc.lock ->key#24 FD: 1 BD: 123 ++..: k-clock-AF_INET FD: 832 BD: 2 +.+.: reg_work ->rtnl_mutex FD: 1 BD: 69 +...: reg_pending_beacons_lock FD: 846 BD: 2 +.+.: (work_completion)(&fw_work->work) ->fs_reclaim ->pool_lock#2 ->&fw_cache.lock ->tk_core.seq.seqcount ->async_lock ->init_task.alloc_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&zone->lock ->&____s->seqcount ->(console_sem).lock ->console_owner_lock ->console_owner ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->umhelper_sem ->fw_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 2 BD: 3 +.+.: &fw_cache.lock ->pool_lock#2 FD: 1 BD: 1 +.+.: detector_work FD: 1 BD: 1 +.+.: acpi_gpio_deferred_req_irqs_lock FD: 30 BD: 1 ..-.: fs/file_table.c:431 FD: 1 BD: 1 +.+.: prepare_lock FD: 27 BD: 2 +.+.: (delayed_fput_work).work ->&obj_hash[i].lock ->&rq->__lock ->pool_lock#2 FD: 3 BD: 4 +.+.: subsys mutex#80 ->&k->k_lock FD: 2 BD: 11 +.+.: fw_lock ->&x->wait#23 FD: 1 BD: 12 ....: &x->wait#23 FD: 1 BD: 1 +.+.: cdev_lock FD: 364 BD: 2 +.+.: &tty->legacy_mutex ->&tty->read_wait ->&tty->write_wait ->&tty->ldisc_sem ->&tty->files_lock ->&port->lock ->&port->mutex ->&port_lock_key ->tasklist_lock ->&tty->ctrl.lock ->&f->f_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 7 ....: &tty->read_wait FD: 28 BD: 4290 -.-.: &tty->write_wait ->&p->pi_lock FD: 348 BD: 3 ++++: &tty->ldisc_sem ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&tty->write_wait ->&tty->read_wait ->&tty->termios_rwsem ->&mm->mmap_lock ->&port_lock_key ->&port->lock ->&tty->flow.lock ->&ldata->atomic_read_lock FD: 263 BD: 6 ++++: &tty->termios_rwsem ->&port->mutex ->&tty->write_wait ->&tty->read_wait ->&ldata->output_lock ->&port_lock_key FD: 1 BD: 5 +.+.: &tty->files_lock FD: 1 BD: 4290 -.-.: &port->lock FD: 137 BD: 10 +.+.: hash_mutex ->fs_reclaim ->pool_lock#2 FD: 36 BD: 10 -.-.: &i->lock ->&port_lock_key FD: 1 BD: 1 +.+.: detected_devices_mutex FD: 28 BD: 4326 ....: &wq#2 ->&p->pi_lock FD: 394 BD: 1 +.+.: &bdev->bd_fsfreeze_mutex ->sb_lock ->fs_reclaim ->pool_lock#2 ->&type->s_umount_key#24/1 ->&c->lock ->&____s->seqcount ->&type->s_umount_key#25/1 ->&type->s_umount_key#26/1 ->&type->s_umount_key#27/1 FD: 158 BD: 2 +.+.: &type->s_umount_key#24/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->&c->lock ->&____s->seqcount ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->&wq->mutex ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->wq_pool_mutex ->mmu_notifier_invalidate_range_start ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->bit_wait_table + i ->&zone->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->wq_mayday_lock ->&cfs_rq->removed.lock ->&sbi->old_work_lock ->(work_completion)(&(&sbi->old_work)->work) FD: 28 BD: 4284 ..-.: bit_wait_table + i ->&p->pi_lock FD: 31 BD: 2 +.+.: (work_completion)(&s->destroy_work) ->&rsp->gp_wait ->pcpu_lock ->&obj_hash[i].lock FD: 1 BD: 3 +.+.: &sbi->old_work_lock FD: 1 BD: 3 +.+.: (work_completion)(&(&sbi->old_work)->work) FD: 12 BD: 4288 ....: &xa->xa_lock#13 ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock FD: 156 BD: 2 +.+.: &type->s_umount_key#25/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->lock#3 FD: 12 BD: 6 +.+.: (work_completion)(work) ->lock#4 ->lock#5 FD: 156 BD: 2 +.+.: &type->s_umount_key#26/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->lock#3 FD: 384 BD: 2 +.+.: &type->s_umount_key#27/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&c->lock ->pool_lock#2 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->percpu_counters_lock ->inode_hash_lock ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_mutex_key#8 ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&journal->j_state_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&journal->j_wait_done_commit ->&p->alloc_lock ->cpu_hotplug_lock ->wq_pool_mutex ->&ei->i_es_lock ->ext4_grpinfo_slab_create_mutex ->&s->s_inode_list_lock ->ext4_li_mtx ->lock ->&root->kernfs_rwsem ->(console_sem).lock ->&dentry->d_lock FD: 21 BD: 158 +.+.: &bgl->locks[i].lock ->&sbi->s_md_lock ->&obj_hash[i].lock ->pool_lock#2 ->&ei->i_prealloc_lock ->quarantine_lock ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&pa->pa_lock#2 ->&meta->lock ->kfence_freelist_lock ->&base->lock FD: 55 BD: 4271 +.+.: &sb->s_type->i_lock_key#22 ->&dentry->d_lock ->&lru->node[i].lock ->&xa->xa_lock#7 ->bit_wait_table + i FD: 28 BD: 151 ..-.: &rsp->gp_wait ->&obj_hash[i].lock ->pool_lock#2 ->&p->pi_lock FD: 246 BD: 7 ++++: &sb->s_type->i_mutex_key#8 ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->integrity_iint_lock ->&rq->__lock ->&sem->wait_lock ->remove_cache_srcu ->tk_core.seq.seqcount ->&ei->xattr_sem ->fs_reclaim ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&meta->lock ->mapping.invalidate_lock ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->swap_cgroup_mutex ->&base->lock ->&fq->mq_flush_lock ->&x->wait#26 ->(&timer.timer) ->swapon_mutex ->proc_poll_wait.lock ->&dentry->d_lock ->&____s->seqcount#2 ->stock_lock ->&n->list_lock ->&p->pi_lock ->&mm->mmap_lock ->ima_extend_list_mutex ->&p->alloc_lock ->&list->lock ->kauditd_wait.lock ->&sbi->s_writepages_rwsem ->&folio_wait_table[i] ->lock#5 ->&journal->j_wait_transaction_locked ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&dd->lock ->&rcu_state.expedited_wq ->&sem->waiters ->&rsp->gp_wait ->pool_lock ->&lruvec->lru_lock ->key#3 ->key#14 ->&sb->s_type->i_mutex_key#8/4 ->&mapping->i_mmap_rwsem ->&journal->j_list_lock ->bit_wait_table + i ->batched_entropy_u32.lock ->(console_sem).lock ->console_owner_lock ->console_owner FD: 46 BD: 4256 ++++: &ei->i_es_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&sbi->s_es_lock ->&zone->lock ->&obj_hash[i].lock ->key#2 ->key#6 ->key#7 ->key#8 ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock ->&n->list_lock ->quarantine_lock ->(console_sem).lock ->console_owner_lock ->console_owner FD: 158 BD: 157 ++++: &ei->i_data_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&ei->i_es_lock ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&ei->i_prealloc_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&sb->s_type->i_lock_key#22 ->&(ei->i_block_reservation_lock) ->&rq->__lock ->&ei->i_raw_lock ->&wb->list_lock ->&mapping->private_lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->key#14 ->&sbi->s_md_lock ->key#3 ->&lg->lg_mutex ->bit_wait_table + i ->rcu_node_0 ->&wb->work_lock ->&n->list_lock ->&____s->seqcount#2 ->&pa->pa_lock#2 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->remove_cache_srcu ->quarantine_lock ->&journal->j_wait_updates ->stock_lock ->&xa->xa_lock#7 ->lock#4 ->&bgl->locks[i].lock ->&sem->wait_lock ->&dd->lock ->&journal->j_state_lock ->&base->lock ->&ei->i_data_sem/1 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&lock->wait_lock ->&p->pi_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->fs_reclaim FD: 1 BD: 4257 +.+.: &sbi->s_es_lock FD: 77 BD: 157 ++++: &journal->j_state_lock ->&journal->j_wait_done_commit ->&journal->j_wait_commit ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->&journal->j_wait_updates ->&journal->j_wait_transaction_locked ->&journal->j_list_lock ->&journal->j_wait_reserved FD: 28 BD: 158 ....: &journal->j_wait_done_commit ->&p->pi_lock FD: 28 BD: 158 ....: &journal->j_wait_commit ->&p->pi_lock FD: 167 BD: 3 +.+.: ext4_grpinfo_slab_create_mutex ->slab_mutex FD: 141 BD: 4 +.+.: ext4_li_mtx ->fs_reclaim ->pool_lock#2 ->batched_entropy_u16.lock ->&eli->li_list_mtx ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock FD: 1 BD: 1 ....: &rs->lock FD: 194 BD: 5 ++++: &type->i_mutex_dir_key#3 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->inode_hash_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#22 ->namespace_sem ->&c->lock ->tomoyo_ss ->&s->s_inode_list_lock ->&ei->xattr_sem ->jbd2_handle ->&n->list_lock ->rcu_node_0 ->&____s->seqcount#2 ->stock_lock ->remove_cache_srcu ->&journal->j_wait_transaction_locked ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&sem->wait_lock ->&meta->lock ->quarantine_lock ->&rcu_state.expedited_wq ->&rcu_state.gp_wq ->&base->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 42 BD: 75 +.+.: rcu_state.barrier_mutex ->rcu_state.barrier_lock ->&x->wait#24 ->&rq->__lock ->rcu_state.barrier_mutex.wait_lock ->&pool->lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 28 BD: 76 ..-.: &x->wait#24 ->&p->pi_lock FD: 1 BD: 1 +.+.: (init_mm).mmap_lock FD: 165 BD: 1 +.+.: &type->s_umount_key#28/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key#9 ->&dentry->d_lock FD: 41 BD: 4268 +.+.: &sb->s_type->i_lock_key#23 ->&dentry->d_lock ->bit_wait_table + i FD: 150 BD: 4 ++++: &sb->s_type->i_mutex_key#9 ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->rename_lock.seqcount ->proc_subdir_lock ->sysctl_lock ->&obj_hash[i].lock ->&p->alloc_lock ->&pid->lock ->namespace_sem ->tomoyo_ss ->&n->list_lock ->&rq->__lock ->remove_cache_srcu ->&xa->xa_lock#13 ->stock_lock ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.gp_wq ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 239 BD: 2 .+.+: sb_writers#3 ->mount_lock ->&sb->s_type->i_mutex_key#9 ->sysctl_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&h->resize_lock ->hugetlb_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#23 ->&wb->list_lock ->&dentry->d_lock ->tomoyo_ss ->&mm->mmap_lock ->oom_adj_mutex ->remove_cache_srcu ->&p->pi_lock ->&rq->__lock ->&c->lock ->&____s->seqcount#11 ->&(&net->ipv4.ping_group_range.lock)->lock ->rcu_node_0 ->oom_adj_mutex.wait_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&sb->s_type->i_mutex_key#9/1 FD: 139 BD: 3 +.+.: &h->resize_lock ->free_hpage_work ->hugetlb_lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4 +.+.: free_hpage_work FD: 2 BD: 141 ....: hugetlb_lock ->&____s->seqcount#2 FD: 199 BD: 143 ++++: mapping.invalidate_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->&ei->i_es_lock ->&ei->i_data_sem ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&c->lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rcu_node_0 ->&mapping->i_mmap_rwsem ->&journal->j_state_lock ->jbd2_handle ->&base->lock ->&mapping->private_lock ->&sbi->s_writepages_rwsem ->&sem->waiters ->&rsp->gp_wait ->remove_cache_srcu ->stock_lock ->&sb->s_type->i_lock_key#22 ->lock#5 ->&lruvec->lru_lock ->&folio_wait_table[i] ->&sem->wait_lock ->&____s->seqcount#2 ->&journal->j_wait_transaction_locked ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&meta->lock ->&n->list_lock ->&wb->list_lock ->quarantine_lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&p->pi_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->fs_reclaim ->batched_entropy_u32.lock FD: 1 BD: 4251 ++++: integrity_iint_lock FD: 215 BD: 4 +.+.: &iint->mutex ->&ei->xattr_sem ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->ima_extend_list_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->mapping.invalidate_lock ->&folio_wait_table[i] ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->tk_core.seq.seqcount ->&meta->lock ->&lock->wait_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&n->list_lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->rcu_node_0 ->&p->alloc_lock ->&list->lock ->kauditd_wait.lock ->ima_extend_list_mutex.wait_lock ->&p->pi_lock ->&rcu_state.expedited_wq FD: 60 BD: 10 .+.+: &ei->xattr_sem ->&mapping->private_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->tk_core.seq.seqcount ->&dd->lock ->&c->lock ->bit_wait_table + i FD: 1 BD: 5 ++++: entries_lock FD: 249 BD: 2 +.+.: &sig->exec_update_lock ->&p->alloc_lock ->&sighand->siglock ->&newf->file_lock ->batched_entropy_u64.lock ->&mm->mmap_lock ->delayed_uprobe_lock ->&memcg->mm_list.lock ->pgd_lock ->pool_lock#2 ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->&rq->__lock ->pool_lock ->quarantine_lock ->&base->lock ->stock_lock ->&cfs_rq->removed.lock FD: 1 BD: 4251 +.+.: &memcg->mm_list.lock FD: 3 BD: 113 ..-.: batched_entropy_u16.lock ->crngs.lock FD: 28 BD: 4256 +.+.: ptlock_ptr(page)#2/1 FD: 136 BD: 1 ++++: &type->s_umount_key#29 ->shrinker_rwsem ->&dentry->d_lock ->rename_lock.seqcount ->&dentry->d_lock/1 ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->&lru->node[i].lock ->&rq->__lock ->&pid->lock FD: 843 BD: 2 +.+.: (work_completion)(&map->work) ->&obj_hash[i].lock ->pool_lock#2 ->stock_lock ->dev_map_lock ->rcu_node_0 ->&rnp->exp_wq[0] ->&rq->__lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->rcu_state.barrier_mutex ->vmap_area_lock ->purge_vmap_area_lock ->pcpu_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&rnp->exp_wq[2] ->rcu_state.barrier_mutex.wait_lock ->percpu_counters_lock ->cgroup_mutex ->callchain_mutex ->quarantine_lock FD: 833 BD: 2 +.+.: (work_completion)(&aux->work) ->map_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->pack_mutex ->pcpu_lock ->vmap_area_lock ->purge_vmap_area_lock ->stock_lock ->&base->lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&aux->poke_mutex ->&rq->__lock ->pack_mutex.wait_lock ->&p->pi_lock ->rtnl_mutex ->rcu_node_0 ->rtnl_mutex.wait_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 940 BD: 1 +.+.: &f->f_pos_lock ->&mm->mmap_lock ->&p->lock ->tk_core.seq.seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&type->i_mutex_dir_key#3 ->sb_writers#5 ->&sb->s_type->i_mutex_key#9 ->&type->i_mutex_dir_key#2 ->sb_writers#3 ->&type->i_mutex_dir_key#4 ->sb_writers#8 ->&type->i_mutex_dir_key#5 ->&rq->__lock ->fs_reclaim ->&of->mutex ->&c->lock ->&____s->seqcount ->&n->list_lock ->quarantine_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->sysctl_lock ->&zone->lock ->sb_writers#4 ->remove_cache_srcu ->&f->f_lock ->uts_sem ->sb_writers#9 ->sb_writers#10 ->sb_writers#11 ->sb_writers#12 ->&sb->s_type->i_mutex_key#18 ->&lock->wait_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rcu_state.expedited_wq ->sb_writers#15 FD: 1 BD: 4257 ....: key#2 FD: 878 BD: 4 +.+.: &p->lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&c->lock ->&____s->seqcount ->file_systems_lock ->namespace_sem ->&of->mutex ->&n->list_lock ->&rq->__lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->cpufreq_driver_lock ->module_mutex ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->stock_lock ->&rcu_state.expedited_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 153 BD: 1 +.+.: &type->s_umount_key#30/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#24 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 41 BD: 4271 +.+.: &sb->s_type->i_lock_key#24 ->&dentry->d_lock ->bit_wait_table + i FD: 157 BD: 3 ++++: &type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#24 ->namespace_sem ->&____s->seqcount ->tk_core.seq.seqcount ->&c->lock ->remove_cache_srcu ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->&sem->wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 28 BD: 196 ....: &x->wait#25 ->&p->pi_lock FD: 41 BD: 11 +.+.: &net->unx.table.locks[i] ->&net->unx.table.locks[i]/1 FD: 1003 BD: 2 +.+.: &sb->s_type->i_mutex_key#10 ->&net->unx.table.locks[i] ->&u->lock ->&u->peer_wait ->rlock-AF_UNIX ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->nl_table_lock ->nl_table_wait.lock ->clock-AF_NETLINK ->genl_sk_destructing_waitq.lock ->&nlk->wait ->wlock-AF_NETLINK ->(netlink_chain).rwsem ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#8 ->&wb->list_lock ->&dentry->d_lock ->sk_lock-AF_INET ->slock-AF_INET ->clock-AF_INET ->&____s->seqcount ->sk_lock-AF_INET6 ->slock-AF_INET6 ->clock-AF_INET6 ->&cfs_rq->removed.lock ->&table->hash[i].lock ->&net->packet.sklist_lock ->&po->bind_lock ->sk_lock-AF_PACKET ->slock-AF_PACKET ->fanout_mutex ->&rnp->exp_wq[3] ->clock-AF_PACKET ->rlock-AF_PACKET ->pcpu_lock ->elock-AF_PACKET ->&rnp->exp_wq[1] ->&rnp->exp_wq[0] ->&rnp->exp_lock ->&rcu_state.expedited_wq ->&rnp->exp_wq[2] ->rcu_state.exp_mutex ->sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->hci_dev_list_lock ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->quarantine_lock ->stock_lock ->&net->xdp.lock ->&xs->map_list_lock ->&xs->mutex ->clock-AF_XDP ->(work_completion)(&msk->work) ->pool_lock ->&hashinfo->lock#2 ->&pnsocks.lock ->resource_mutex ->clock-AF_PHONET ->rlock-AF_PHONET ->sk_lock-AF_INET6/1 ->&net->sctp.addr_wq_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->l2tp_ip_lock ->sk_lock-AF_INET/1 ->(console_sem).lock ->base_sockets.lock ->clock-AF_ISDN ->&net->ipv4.ra_mutex ->&hashinfo->lock ->krc.lock ->clock-AF_NETROM ->sk_lock-AF_NETROM ->slock-AF_NETROM ->(work_completion)(&smc->connect_work) ->sk_lock-AF_SMC ->slock-AF_SMC ->&smc->clcsock_release_lock ->&bsd_socket_locks[i] ->clock-AF_RXRPC ->(wq_completion)krxrpcd ->&wq->mutex ->rlock-AF_RXRPC ->&zone->lock ->(work_completion)(&strp->work) ->clock-AF_RDS ->&rs->rs_recv_lock ->rds_cong_monitor_lock ->rds_cong_lock ->&rs->rs_lock ->&rs->rs_rdma_lock ->&q->lock#2 ->rds_sock_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&base->lock ->sk_lock-AF_PHONET ->slock-AF_PHONET ->&list->lock#25 ->l2tp_ip6_lock ->&c->lock ->sk_lock-AF_LLC ->slock-AF_LLC ->&dir->lock#2 ->(&llc->pf_cycle_timer.timer) ->(&llc->ack_timer.timer) ->(&llc->rej_sent_timer.timer) ->(&llc->busy_state_timer.timer) ->rlock-AF_LLC ->wlock-AF_LLC ->&list->lock#27 ->pfkey_mutex ->clock-AF_KEY ->wlock-AF_KEY ->rlock-AF_KEY ->clock-AF_ROSE ->sk_lock-AF_ROSE ->slock-AF_ROSE ->wlock-AF_ROSE ->&list->lock#28 ->nfnl_grp_active_lock ->&meta->lock ->kfence_freelist_lock ->sk_lock-AF_TIPC ->slock-AF_TIPC ->l2cap_sk_list.lock ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&chan->lock/1 ->&conn->chan_lock ->chan_list_lock ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->sk_lock-AF_X25 ->slock-AF_X25 ->sk_lock-AF_CAN ->slock-AF_CAN ->raw_lock ->clock-AF_IEEE802154 ->rlock-AF_IEEE802154 ->sk_lock-AF_PPPOX ->slock-AF_PPPOX ->rlock-AF_CAIF ->sk_lock-AF_CAIF ->slock-AF_CAIF ->elock-AF_CAIF ->rlock-AF_PPPOX ->wlock-AF_PPPOX ->clock-AF_NFC ->rlock-AF_NFC ->sk_lock-AF_AX25 ->slock-AF_AX25 ->hidp_sk_list.lock ->clock-AF_BLUETOOTH ->&n->list_lock ->&data->lock ->cpu_hotplug_lock ->raw_notifier_lock ->rtnl_mutex ->rlock-AF_CAN ->elock-AF_CAN ->&list->lock#37 ->sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->sco_sk_list.lock ->&x->wait#10 ->dgram_lock ->sk_lock-AF_QIPCRTR ->slock-AF_QIPCRTR ->bcm_notifier_lock ->rtnl_mutex.wait_lock ->&rng->jent_lock ->bnep_sk_list.lock ->vmap_area_lock ->purge_vmap_area_lock ->&match->lock ->data_sockets.lock ->sk_lock-AF_ISDN ->slock-AF_ISDN ->sk_lock-AF_KCM ->slock-AF_KCM ->&mux->lock ->(work_completion)(&kcm->tx_work) ->&mux->rx_lock ->&knet->mutex ->console_owner_lock ->console_owner ->pgd_lock ->key ->percpu_counters_lock ->isotp_notifier_lock ->&x->wait ->(work_completion)(&(&sw_ctx_tx->tx_work.work)->work) FD: 63 BD: 7 +.+.: &u->lock ->clock-AF_UNIX ->&u->lock/1 ->&sk->sk_peer_lock ->rlock-AF_UNIX ->&u->peer_wait ->&ei->socket.wq.wait ->&f->f_owner.lock FD: 1 BD: 8 +...: clock-AF_UNIX FD: 32 BD: 8 +.+.: &u->peer_wait ->&p->pi_lock ->&ei->socket.wq.wait FD: 1 BD: 8 +.+.: rlock-AF_UNIX FD: 260 BD: 2 .+.+: sb_writers#4 ->mount_lock ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&wb->work_lock ->&type->i_mutex_dir_key#3 ->&type->i_mutex_dir_key#3/1 ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->&dd->lock ->bit_wait_table + i ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&sb->s_type->i_mutex_key#8 ->tomoyo_ss ->&sem->wait_lock ->&p->pi_lock ->&s->s_inode_list_lock ->sb_internal ->inode_hash_lock ->&fsnotify_mark_srcu ->stock_lock ->lock#5 ->&lruvec->lru_lock ->integrity_iint_lock ->&dentry->d_lock ->&iint->mutex ->&ei->xattr_sem ->&folio_wait_table[i] ->rcu_node_0 ->quarantine_lock ->&sbi->s_writepages_rwsem ->&____s->seqcount#2 ->&journal->j_wait_transaction_locked ->&cfs_rq->removed.lock ->remove_cache_srcu ->&journal->j_list_lock ->&rcu_state.expedited_wq ->&n->list_lock ->fs_reclaim ->mapping.invalidate_lock ->&base->lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&journal->j_barrier ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&lock->wait_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&journal->j_wait_reserved ->&sb->s_type->i_mutex_key#8/4 ->&pgdat->reclaim_wait[i] ->(&timer.timer) FD: 1 BD: 4251 +.+.: &pid->lock FD: 1 BD: 27 +.+.: &new_ns->ns_lock FD: 214 BD: 1 ++++: &type->s_umount_key#31 ->&lru->node[i].lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#22 ->&obj_hash[i].lock ->pool_lock#2 ->&journal->j_state_lock ->&p->alloc_lock ->(work_completion)(&sbi->s_error_work) ->key#3 ->key#4 ->&sbi->s_error_lock ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&base->lock ->&fq->mq_flush_lock ->&dd->lock ->bit_wait_table + i ->&rq->__lock ->ext4_li_mtx ->(console_sem).lock ->mount_lock ->&xa->xa_lock#7 ->&eli->li_list_mtx ->&wb->list_lock ->&sbi->s_writepages_rwsem ->rcu_node_0 ->&bdi->wb_waitq ->&rcu_state.expedited_wq ->&s->s_inode_list_lock ->&ei->i_es_lock ->inode_hash_lock ->&fsnotify_mark_srcu ->&cfs_rq->removed.lock ->&ei->i_prealloc_lock ->integrity_iint_lock ->&journal->j_list_lock FD: 1 BD: 2 +.+.: (work_completion)(&sbi->s_error_work) FD: 1 BD: 159 ....: key#3 FD: 1 BD: 155 ....: key#4 FD: 1 BD: 2 +.+.: &sbi->s_error_lock FD: 33 BD: 157 ..-.: &fq->mq_flush_lock ->tk_core.seq.seqcount ->&q->requeue_lock ->&obj_hash[i].lock ->bit_wait_table + i ->&x->wait#26 FD: 1 BD: 163 ..-.: &q->requeue_lock FD: 4 BD: 5 +.+.: &eli->li_list_mtx ->&obj_hash[i].lock ->pool_lock#2 FD: 177 BD: 154 ++++: jbd2_handle ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->&ei->i_raw_lock ->&journal->j_wait_updates ->&mapping->private_lock ->&meta_group_info[i]->alloc_sem ->tk_core.seq.seqcount ->inode_hash_lock ->batched_entropy_u32.lock ->&ei->i_es_lock ->&sb->s_type->i_lock_key#22 ->&obj_hash[i].lock ->&journal->j_state_lock ->&rq->__lock ->bit_wait_table + i ->rcu_node_0 ->&sbi->s_orphan_lock ->&ei->i_data_sem ->&journal->j_list_lock ->&xa->xa_lock#7 ->lock#4 ->lock#5 ->&base->lock ->&dd->lock ->&rq_wait->wait ->&zone->lock ->stock_lock ->&ei->i_prealloc_lock ->&(ei->i_block_reservation_lock) ->&____s->seqcount#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->key#4 ->&rcu_state.expedited_wq ->remove_cache_srcu ->&bgl->locks[i].lock ->&lock->wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&n->list_lock ->&sem->wait_lock ->&journal->j_wait_reserved ->&folio_wait_table[i] ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&ei->i_data_sem/1 ->(console_sem).lock ->&pool->lock#3 ->quarantine_lock ->&s->s_inode_list_lock ->key#28 FD: 72 BD: 159 +.+.: &ret->b_state_lock ->&journal->j_list_lock ->bit_wait_table + i ->&obj_hash[i].lock FD: 71 BD: 4260 +.+.: &journal->j_list_lock ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->&obj_hash[i].lock ->&c->lock ->pool_lock#2 ->key#15 ->&meta->lock ->kfence_freelist_lock ->bit_wait_table + i FD: 1 BD: 157 +.+.: &journal->j_revoke_lock FD: 1 BD: 158 +.+.: &ei->i_raw_lock FD: 28 BD: 158 ....: &journal->j_wait_updates ->&p->pi_lock FD: 32 BD: 4288 ..-.: &wb->work_lock ->&obj_hash[i].lock ->&base->lock FD: 53 BD: 155 ++++: &meta_group_info[i]->alloc_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&x->wait#26 ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->&base->lock ->(&timer.timer) ->&fq->mq_flush_lock ->&bgl->locks[i].lock ->&cfs_rq->removed.lock FD: 182 BD: 150 .+.+: sb_internal ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->&meta->lock ->rcu_node_0 ->&n->list_lock ->remove_cache_srcu ->&journal->j_wait_transaction_locked ->quarantine_lock ->&cfs_rq->removed.lock ->&base->lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 2 BD: 4258 ++++: &ei->i_prealloc_lock ->&pa->pa_lock#2 FD: 32 BD: 1 .+.+: file_rwsem ->&ctx->flc_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 2 BD: 2 +.+.: &ctx->flc_lock ->&fll->lock FD: 1 BD: 3 +.+.: &fll->lock FD: 230 BD: 3 +.+.: &type->i_mutex_dir_key#3/1 ->rename_lock.seqcount ->&dentry->d_lock ->fs_reclaim ->&c->lock ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->inode_hash_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#22 ->tomoyo_ss ->&s->s_inode_list_lock ->&ei->xattr_sem ->jbd2_handle ->&sb->s_type->i_mutex_key#8 ->&sem->wait_lock ->&xa->xa_lock#13 ->stock_lock ->&n->list_lock ->&fsnotify_mark_srcu ->&type->i_mutex_dir_key#3 ->&wb->list_lock ->sb_internal ->&____s->seqcount#2 ->rcu_node_0 ->&cfs_rq->removed.lock ->&journal->j_wait_transaction_locked ->remove_cache_srcu ->&rcu_state.gp_wq ->&u->bindlock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->quarantine_lock ->&base->lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit FD: 29 BD: 12 +.-.: (&cb->timer) ->&obj_hash[i].lock ->&base->lock ->tk_core.seq.seqcount ->&rq_wait->wait FD: 148 BD: 1 +.+.: &type->s_umount_key#32/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#25 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key#11 ->&dentry->d_lock FD: 40 BD: 3 +.+.: &sb->s_type->i_lock_key#25 ->&dentry->d_lock FD: 138 BD: 2 +.+.: &sb->s_type->i_mutex_key#11 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#25 ->&s->s_inode_list_lock ->tk_core.seq.seqcount FD: 139 BD: 1 +.+.: &type->s_umount_key#33 ->sb_lock ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&lru->node[i].lock ->&obj_hash[i].lock FD: 42 BD: 1 +.+.: &type->s_umount_key#34 ->sb_lock ->&dentry->d_lock FD: 147 BD: 1 +.+.: &type->s_umount_key#35/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 40 BD: 5 +.+.: &sb->s_type->i_lock_key#26 ->&dentry->d_lock FD: 1 BD: 1 +.+.: redirect_lock FD: 345 BD: 1 +.+.: &tty->atomic_write_lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&tty->termios_rwsem ->&tty->files_lock FD: 36 BD: 7 +.+.: &ldata->output_lock ->&port_lock_key ->&rq->__lock FD: 147 BD: 1 +.+.: &type->s_umount_key#36/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#27 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->fuse_mutex ->&dentry->d_lock FD: 40 BD: 4268 +.+.: &sb->s_type->i_lock_key#27 ->&dentry->d_lock FD: 1 BD: 2 +.+.: fuse_mutex FD: 148 BD: 1 +.+.: &type->s_umount_key#37/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->&____s->seqcount ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#28 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->pstore_sb_lock ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#28 ->&dentry->d_lock FD: 1 BD: 2 +.+.: pstore_sb_lock FD: 151 BD: 1 +.+.: &type->s_umount_key#38/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#29 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->bpf_preload_lock ->&dentry->d_lock FD: 40 BD: 2 +.+.: &sb->s_type->i_lock_key#29 ->&dentry->d_lock FD: 140 BD: 2 +.+.: bpf_preload_lock ->(kmod_concurrent_max).lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#17 ->&rq->__lock ->running_helpers_waitq.lock FD: 28 BD: 2 ++++: uts_sem ->hostname_poll.wait.lock ->&rq->__lock FD: 139 BD: 1 +.+.: &type->s_umount_key#39 ->sb_lock ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&lru->node[i].lock ->&obj_hash[i].lock FD: 132 BD: 3 ++++: &type->i_mutex_dir_key#5 ->fs_reclaim ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->&sbinfo->stat_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&obj_hash[i].lock ->&sem->wait_lock ->&rq->__lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->rcu_node_0 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.gp_wq ->&rcu_state.expedited_wq FD: 143 BD: 2 .+.+: sb_writers#5 ->mount_lock ->&type->i_mutex_dir_key#5 ->&type->i_mutex_dir_key#5/1 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key ->&wb->list_lock ->&sb->s_type->i_mutex_key#12 ->&s->s_inode_list_lock ->&info->lock ->&sbinfo->stat_lock ->&xa->xa_lock#7 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->tomoyo_ss ->&xattrs->lock ->fs_reclaim ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&dentry->d_lock ->&c->lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 132 BD: 4 +.+.: &sb->s_type->i_mutex_key#12 ->&xattrs->lock ->tk_core.seq.seqcount ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->&info->lock ->&sb->s_type->i_lock_key ->&wb->list_lock ->key#9 ->&rq->__lock ->&dentry->d_lock ->rename_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&sb->s_type->i_mutex_key#12/4 ->rcu_node_0 ->tomoyo_ss ->&mapping->i_mmap_rwsem ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 138 BD: 3 +.+.: &type->i_mutex_dir_key#5/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&u->bindlock ->pool_lock#2 ->&sb->s_type->i_mutex_key#12 ->&fsnotify_mark_srcu ->&sem->wait_lock ->&rq->__lock ->&n->list_lock ->rcu_node_0 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&info->lock ->&xa->xa_lock#7 ->remove_cache_srcu ->&dentry->d_lock/1 ->&sb->s_type->i_mutex_key#12/4 ->&____s->seqcount#2 ->&rcu_state.gp_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 46 BD: 81 +.+.: &f->f_lock ->fasync_lock FD: 1 BD: 2 ....: key#5 FD: 1 BD: 3 ....: hostname_poll.wait.lock FD: 235 BD: 1 .+.+: dup_mmap_sem ->&mm->mmap_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 140 BD: 137 +.+.: &mm->mmap_lock/1 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&vma->vm_lock->lock ->fs_reclaim ->&mapping->i_mmap_rwsem ->&anon_vma->rwsem ->mmu_notifier_invalidate_range_start ->&mm->page_table_lock ->ptlock_ptr(page) ->ptlock_ptr(page)#2 ->&mm->context.lock ->&obj_hash[i].lock ->&zone->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->quarantine_lock ->&n->list_lock ->remove_cache_srcu ->&sem->wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock ->&meta->lock ->key#26 ->&pgdat->kswapd_wait FD: 27 BD: 138 +.+.: &mm->context.lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 1 BD: 11 .+.+: &xattrs->lock FD: 141 BD: 8 +.+.: &u->bindlock ->&net->unx.table.locks[i] ->&net->unx.table.locks[i]/1 ->&bsd_socket_locks[i] ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock FD: 40 BD: 12 +.+.: &net->unx.table.locks[i]/1 ->&dentry->d_lock FD: 1 BD: 11 +.+.: &bsd_socket_locks[i] FD: 257 BD: 2 +.+.: &u->iolock ->rlock-AF_UNIX ->&mm->mmap_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&rq->__lock ->&u->peer_wait ->&u->lock ->&____s->seqcount ->&dir->lock ->rcu_node_0 ->stock_lock ->&base->lock ->&sem->wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 31 BD: 3900 ..-.: &ei->socket.wq.wait ->&p->pi_lock ->&ep->lock FD: 1 BD: 4326 ....: &wq#3 FD: 42 BD: 8 +.+.: &u->lock/1 ->&sk->sk_peer_lock ->&dentry->d_lock ->&sk->sk_peer_lock/1 FD: 150 BD: 1 +.+.: &group->mark_mutex ->&fsnotify_mark_srcu ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->lock ->ucounts_lock ->&mark->lock ->&conn->lock ->&sb->s_type->i_lock_key#5 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_lock_key ->&____s->seqcount#2 ->&rq->__lock ->&n->list_lock ->remove_cache_srcu ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 12 BD: 238 +.+.: &group->inotify_data.idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 3 BD: 2 +.+.: &mark->lock ->&fsnotify_mark_srcu ->&conn->lock FD: 1 BD: 7 +.+.: &conn->lock FD: 1 BD: 1 +.+.: &evdev->client_lock FD: 236 BD: 1 +.+.: &evdev->mutex ->&dev->mutex#2 ->&mm->mmap_lock ->&lock->wait_lock ->&p->pi_lock FD: 251 BD: 5 +.+.: sk_lock-AF_NETLINK ->slock-AF_NETLINK ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->pcpu_alloc_mutex ->&obj_hash[i].lock ->&c->lock ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->rcu_node_0 ->&rq->__lock ->clock-AF_NETLINK FD: 29 BD: 6 +...: slock-AF_NETLINK ->&sk->sk_lock.wq FD: 1 BD: 3882 ..-.: rlock-AF_NETLINK FD: 1 BD: 7 ....: &nlk->wait FD: 142 BD: 77 +.+.: (work_completion)(&ht->run_work) ->&ht->mutex ->&rq->__lock FD: 141 BD: 78 +.+.: &ht->mutex ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->rhashtable_bucket ->&ht->lock ->&c->lock ->&n->list_lock ->remove_cache_srcu ->&____s->seqcount ->&____s->seqcount#2 ->&rq->__lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->quarantine_lock ->&meta->lock FD: 1 BD: 3689 ....: rhashtable_bucket/1 FD: 12 BD: 79 +.+.: &ht->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 8 +...: clock-AF_NETLINK FD: 1 BD: 7 ....: genl_sk_destructing_waitq.lock FD: 1 BD: 7 ....: wlock-AF_NETLINK FD: 182 BD: 2 +.+.: (work_completion)(&w->w) ->nfc_devlist_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->&base->lock FD: 1 BD: 4 +...: &rdev->beacon_registrations_lock FD: 27 BD: 4 +.+.: &genl_data->genl_data_mutex ->&rq->__lock FD: 1 BD: 75 +.-.: &rdev->mgmt_registrations_lock FD: 1 BD: 4 +...: &wdev->pmsr_lock FD: 1 BD: 70 +.+.: reg_indoor_lock FD: 1008 BD: 1 .+.+: sb_writers#6 ->mount_lock ->&sb->s_type->i_mutex_key#10 ->&rq->__lock FD: 2 BD: 9 +.+.: &sk->sk_peer_lock ->&sk->sk_peer_lock/1 FD: 31 BD: 7 ....: &group->notification_waitq ->&p->pi_lock ->&ep->lock FD: 1 BD: 7 +.+.: &group->notification_lock FD: 1 BD: 1 ....: &client->wait FD: 1 BD: 4257 ....: key#6 FD: 1 BD: 4257 ....: key#7 FD: 1 BD: 4257 ....: key#8 FD: 849 BD: 1 +.+.: &pipe->mutex/1 ->&pipe->rd_wait ->&rq->__lock ->&lock->wait_lock ->&pipe->wr_wait ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&mm->mmap_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->&zone->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->sk_lock-AF_NETLINK ->slock-AF_NETLINK ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->&c->lock ->nfnl_subsys_ctnetlink ->purge_vmap_area_lock ->&sighand->siglock ->&sem->wait_lock ->&p->pi_lock ->&rcu_state.expedited_wq ->&n->list_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->sk_lock-AF_INET6 ->slock-AF_INET6 ->rtnl_mutex ->rlock-AF_NETLINK ->&u->iolock ->&ei->socket.wq.wait ->&f->f_lock ->&u->lock ->&pipe->mutex#2/2 ->&____s->seqcount#2 ->&data->lock ->quarantine_lock ->rtnl_mutex.wait_lock FD: 31 BD: 4 ....: &pipe->rd_wait ->&p->pi_lock ->&ep->lock FD: 28 BD: 158 ..-.: &x->wait#26 ->&p->pi_lock FD: 1 BD: 4254 ....: &sem->wait_lock FD: 31 BD: 4 ....: &pipe->wr_wait ->&p->pi_lock ->&ep->lock FD: 46 BD: 1 .+.+: sb_writers#7 ->tk_core.seq.seqcount ->mount_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 4235 ....: key#9 FD: 4 BD: 4326 +.+.: &dentry->d_lock/2 ->&dentry->d_lock/3 FD: 3 BD: 4327 +.+.: &dentry->d_lock/3 ->&____s->seqcount#4 FD: 1 BD: 4329 +.+.: &____s->seqcount#4/1 FD: 263 BD: 1 +.+.: sk_lock-AF_UNIX ->slock-AF_UNIX ->&mm->mmap_lock ->fs_reclaim ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->stock_lock ->&c->lock ->pcpu_alloc_mutex ->&obj_hash[i].lock ->&rq->__lock ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->&n->list_lock ->&f->f_lock FD: 1 BD: 2 +...: slock-AF_UNIX FD: 1 BD: 1 ....: &rs->lock#2 FD: 54 BD: 3 +.+.: oom_adj_mutex ->&p->alloc_lock ->rcu_node_0 ->&rq->__lock ->oom_adj_mutex.wait_lock ->&rcu_state.expedited_wq FD: 216 BD: 2 +.+.: &ep->mtx ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&f->f_lock ->&ei->socket.wq.wait ->&ep->lock ->&group->notification_waitq ->&group->notification_lock ->&sighand->signalfd_wqh ->&sighand->siglock ->&pipe->rd_wait ->&obj_hash[i].lock ->&rq->__lock ->&cfs_rq->removed.lock ->key#12 ->&lock->wait_lock ->remove_cache_srcu ->&pipe->wr_wait ->stock_lock ->wakeup_ida.xa_lock ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->uevent_sock_mutex ->subsys mutex#15 ->events_lock ->&dentry->d_lock ->&n->list_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&u->lock ->&ACCESS_PRIVATE(sdp, lock) ->wakeup_srcu ->&x->wait#3 ->(&ws->timer) ->&base->lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->&ws->lock ->deleted_ws.lock ->kernfs_idr_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&ep->mtx/1 ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->quarantine_lock ->&ep->poll_wait FD: 217 BD: 1 +.+.: epnested_mutex ->&ep->mtx ->&ep->mtx/1 FD: 30 BD: 3917 ...-: &ep->lock ->&ep->wq ->&ws->lock FD: 31 BD: 154 ....: &sighand->signalfd_wqh ->&p->pi_lock ->&ep->lock FD: 871 BD: 2 .+.+: sb_writers#8 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&wb->list_lock ->&type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->remove_cache_srcu ->&rq->__lock ->&root->kernfs_iattr_rwsem ->&dentry->d_lock ->tomoyo_ss ->&sb->s_type->i_mutex_key#13 ->iattr_mutex ->&xattrs->lock ->&____s->seqcount#2 ->&____s->seqcount ->rcu_node_0 ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&rcu_state.expedited_wq FD: 3 BD: 10 +.+.: swap_lock ->&p->lock#2 FD: 150 BD: 1 .+.+: kn->active ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&k->list_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->remove_cache_srcu FD: 137 BD: 69 +.+.: &kernfs_locks->open_file_mutex[count] ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount ->&rq->__lock ->&n->list_lock ->rcu_node_0 ->remove_cache_srcu ->&lock->wait_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 139 BD: 3 +.+.: &sb->s_type->i_mutex_key#13 ->&rq->__lock ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->&sem->wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 869 BD: 6 +.+.: &of->mutex ->&rq->__lock ->&p->pi_lock ->cgroup_mutex ->cpu_hotplug_lock ->cgroup_mutex.wait_lock ->cpuset_hotplug_work ->&lock->wait_lock FD: 28 BD: 3918 ..-.: &ep->wq ->&p->pi_lock FD: 149 BD: 1 .+.+: kn->active#2 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&____s->seqcount ->remove_cache_srcu ->quarantine_lock ->rcu_node_0 ->&rq->__lock FD: 35 BD: 2 +.+.: (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->krc.lock ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 149 BD: 1 .+.+: kn->active#3 ->&rq->__lock ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->remove_cache_srcu ->quarantine_lock FD: 143 BD: 1 .+.+: kn->active#4 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->param_lock ->&c->lock ->pool_lock#2 ->&on->poll ->&n->list_lock FD: 137 BD: 247 +.+.: iattr_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->tk_core.seq.seqcount ->&rq->__lock FD: 1 BD: 71 +.+.: disk_events_mutex FD: 178 BD: 1 .+.+: kn->active#5 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->&rq->__lock ->quarantine_lock ->&device->physical_node_lock ->udc_lock ->remove_cache_srcu ->fw_lock ->rcu_node_0 ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->&rfkill->lock ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->&base->lock ->&meta->lock FD: 139 BD: 1 .+.+: kn->active#6 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 139 BD: 1 .+.+: kn->active#7 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 139 BD: 1 .+.+: kn->active#8 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#9 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&n->list_lock FD: 139 BD: 1 .+.+: kn->active#10 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount FD: 139 BD: 1 .+.+: kn->active#11 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount FD: 139 BD: 1 .+.+: kn->active#12 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 139 BD: 1 .+.+: kn->active#13 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 139 BD: 1 .+.+: kn->active#14 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount ->&rq->__lock FD: 1 BD: 223 +.+.: rcu_state.exp_mutex.wait_lock FD: 141 BD: 1 .+.+: kn->active#15 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 141 BD: 1 .+.+: kn->active#16 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&n->list_lock FD: 138 BD: 1 .+.+: kn->active#17 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->remove_cache_srcu ->&c->lock FD: 139 BD: 1 .+.+: kn->active#18 ->fs_reclaim ->&c->lock ->&n->list_lock ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#19 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#20 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 141 BD: 1 .+.+: kn->active#21 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&____s->seqcount ->&n->list_lock FD: 141 BD: 1 .+.+: kn->active#22 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&n->list_lock FD: 141 BD: 1 .+.+: kn->active#23 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock FD: 141 BD: 1 .+.+: kn->active#24 ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&dev->power.lock ->pci_lock FD: 2 BD: 8 ....: pci_lock ->pci_config_lock FD: 139 BD: 1 .+.+: kn->active#25 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#26 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 138 BD: 1 .+.+: kn->active#27 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->remove_cache_srcu FD: 139 BD: 1 .+.+: kn->active#28 ->&rq->__lock ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 139 BD: 1 .+.+: kn->active#29 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount FD: 138 BD: 1 .+.+: kn->active#30 ->fs_reclaim ->&c->lock ->&n->list_lock ->&kernfs_locks->open_file_mutex[count] ->remove_cache_srcu ->&____s->seqcount FD: 139 BD: 1 .+.+: kn->active#31 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&rq->__lock ->&n->list_lock FD: 139 BD: 1 .+.+: kn->active#32 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 139 BD: 1 .+.+: kn->active#33 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount FD: 28 BD: 1 +.-.: (&journal->j_commit_timer) ->&p->pi_lock FD: 120 BD: 151 +.+.: &journal->j_checkpoint_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&base->lock ->bit_wait_table + i ->&rq->__lock ->&journal->j_state_lock ->&fq->mq_flush_lock ->&x->wait#26 ->&journal->j_list_lock ->rcu_node_0 ->&c->lock ->(&timer.timer) ->&ei->i_es_lock ->&mapping->private_lock ->&meta->lock ->kfence_freelist_lock ->&sb->s_type->i_lock_key#3 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&rq_wait->wait FD: 28 BD: 160 ....: &journal->j_wait_transaction_locked ->&p->pi_lock FD: 32 BD: 3 ..-.: &(&wb->dwork)->timer FD: 1 BD: 4270 ..-.: &memcg->move_lock FD: 1 BD: 4288 ..-.: key#10 FD: 1 BD: 159 +.+.: &sbi->s_md_lock FD: 1 BD: 1 ....: &journal->j_fc_wait FD: 1 BD: 1 +.+.: &journal->j_history_lock FD: 203 BD: 3 +.+.: (wq_completion)writeback ->(work_completion)(&(&wb->dwork)->work) ->(work_completion)(&(&wb->bw_dwork)->work) ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->(work_completion)(&barr->work) FD: 200 BD: 4 +.+.: (work_completion)(&(&wb->dwork)->work) ->&wb->work_lock ->&wb->list_lock ->&p->sequence ->key#10 ->&sb->s_type->i_lock_key#22 ->&sbi->s_writepages_rwsem ->pool_lock#2 ->&obj_hash[i].lock ->&dd->lock ->&rq->__lock ->&pl->lock ->&bdi->wb_waitq FD: 2 BD: 6 +.-.: &p->sequence ->key#13 FD: 197 BD: 149 ++++: &sbi->s_writepages_rwsem ->&xa->xa_lock#7 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->lock#4 ->lock#5 ->&obj_hash[i].lock ->&journal->j_state_lock ->jbd2_handle ->tk_core.seq.seqcount ->&dd->lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->&rq_wait->wait ->rcu_node_0 ->&rq->__lock ->&mapping->private_lock ->&____s->seqcount#2 ->&journal->j_wait_transaction_locked ->&rsp->gp_wait ->&rnp->exp_wq[0] ->&journal->j_barrier ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&ei->i_data_sem ->&rnp->exp_wq[1] ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&folio_wait_table[i] ->&n->list_lock ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&sem->waiters ->(console_sem).lock ->console_owner_lock ->console_owner ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#22 ->&s->s_inode_list_lock ->sb_internal ->inode_hash_lock ->&fsnotify_mark_srcu ->&lruvec->lru_lock ->quarantine_lock FD: 86 BD: 1 .+.+: &type->s_umount_key#40 ->&sb->s_type->i_lock_key#3 ->mmu_notifier_invalidate_range_start ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&dd->lock ->lock#4 ->lock#5 ->&rq->__lock ->&wb->list_lock ->&base->lock ->&rq_wait->wait ->rcu_node_0 ->&n->list_lock ->bit_wait_table + i ->lock#10 FD: 1 BD: 4288 ..-.: &s->s_inode_wblist_lock FD: 1 BD: 4289 ..-.: key#11 FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#14 FD: 56 BD: 1 .+.+: mapping.invalidate_lock#2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->tk_core.seq.seqcount ->&dd->lock ->&rq->__lock ->&c->lock FD: 1 BD: 1 +.+.: &mousedev->client_lock FD: 38 BD: 8 +.+.: &mousedev->mutex#2 ->&dev->mutex#2 FD: 32 BD: 3 ..-.: &(&wb->bw_dwork)->timer FD: 68 BD: 4 +.+.: (work_completion)(&(&wb->bw_dwork)->work) ->&wb->list_lock ->&rq->__lock FD: 1 BD: 212 +.+.: uevent_sock_mutex.wait_lock FD: 49 BD: 5 +.+.: &sb->s_type->i_mutex_key#12/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 139 BD: 1 .+.+: kn->active#34 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#35 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#36 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 138 BD: 1 .+.+: kn->active#37 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->remove_cache_srcu ->&n->list_lock FD: 140 BD: 1 .+.+: kn->active#38 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->i2c_dev_list_lock FD: 142 BD: 2 +.+.: &dev_instance->mutex ->fs_reclaim ->pool_lock#2 ->vicodec_core:1844:(hdl)->_lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 4 BD: 3 +.+.: vicodec_core:1844:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 5 ....: &vdev->fh_lock FD: 147 BD: 1 +.+.: &mdev->req_queue_mutex ->&dev_instance->mutex ->&vdev->fh_lock ->&mdev->graph_mutex ->vicodec_core:1844:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&dev->dev_mutex ->&dev->mutex#3 ->&rq->__lock ->&cfs_rq->removed.lock FD: 1 BD: 4 ....: &m2m_dev->job_spinlock FD: 1 BD: 4 ....: &q->done_wq FD: 1 BD: 4 +.+.: &q->mmap_lock FD: 1 BD: 1 +.+.: fh->state->lock FD: 142 BD: 2 +.+.: &dev->dev_mutex ->fs_reclaim ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&____s->seqcount ->&obj_hash[i].lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock FD: 4 BD: 3 +.+.: vim2m:1183:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 139 BD: 1 .+.+: kn->active#39 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 1 BD: 1 +.+.: &vcapture->lock FD: 1 BD: 3 ....: key#12 FD: 2 BD: 2 +.+.: &dev->mutex#3 ->&vdev->fh_lock FD: 1 BD: 10 +.+.: &sk->sk_peer_lock/1 FD: 30 BD: 1 ..-.: &(&tbl->gc_work)->timer FD: 51 BD: 2 +.+.: (work_completion)(&(&tbl->gc_work)->work) ->&tbl->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 16 BD: 1 +.-.: (&dom->period_timer) ->key#13 ->&p->sequence ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4290 ..-.: key#13 FD: 30 BD: 1 ..-.: drivers/base/dd.c:321 FD: 38 BD: 2 +.+.: (deferred_probe_timeout_work).work ->device_links_lock ->deferred_probe_mutex ->deferred_probe_work ->&x->wait#10 ->&pool->lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock FD: 42 BD: 4 +.+.: &sb->s_type->i_mutex_key#4/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 2 BD: 4288 ..-.: &pl->lock ->key#11 FD: 139 BD: 1 .+.+: kn->active#40 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#41 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#42 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 1 .+.+: kn->active#43 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&rq->__lock FD: 28 BD: 7 +.+.: &lo->lo_mutex ->&rq->__lock ->rcu_node_0 FD: 48 BD: 10 +.+.: &nbd->config_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&bdev->bd_size_lock ->&q->queue_lock ->&ACCESS_PRIVATE(sdp, lock) ->set->srcu ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#3 ->&c->lock ->pool_lock ->&n->list_lock FD: 31 BD: 10 ....: &ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&ACCESS_PRIVATE(sdp, lock) FD: 139 BD: 1 .+.+: kn->active#44 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 2 BD: 6 +.+.: &new->lock ->&mtdblk->cache_mutex FD: 1 BD: 7 +.+.: &mtdblk->cache_mutex FD: 139 BD: 1 .+.+: kn->active#45 ->fs_reclaim ->&c->lock ->&n->list_lock ->&kernfs_locks->open_file_mutex[count] FD: 235 BD: 1 +.+.: &mtd->master.chrdev_lock ->&mm->mmap_lock FD: 1 BD: 4 +.+.: destroy_lock FD: 32 BD: 1 ..-.: fs/notify/mark.c:89 FD: 146 BD: 2 +.+.: connector_reaper_work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->pool_lock#2 ->&____s->seqcount ->&base->lock ->&cfs_rq->removed.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->pool_lock ->rcu_node_0 ->quarantine_lock FD: 147 BD: 2 +.+.: (reaper_work).work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&x->wait#3 ->&rq->__lock ->pool_lock#2 ->pool_lock ->&base->lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 1 BD: 1 +.+.: userns_state_mutex FD: 4 BD: 70 +...: fib_info_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 105 BD: 73 +...: &net->sctp.local_addr_lock ->&net->sctp.addr_wq_lock FD: 104 BD: 75 +.-.: &net->sctp.addr_wq_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->&____s->seqcount#2 ->slock-AF_INET6/1 ->slock-AF_INET/1 ->k-slock-AF_INET6/1 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock FD: 1 BD: 69 +...: _xmit_LOOPBACK FD: 27 BD: 77 .+.+: netpoll_srcu ->&rq->__lock FD: 18 BD: 93 +.-.: &in_dev->mc_tomb_lock ->pool_lock#2 ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount FD: 20 BD: 93 +.-.: &im->lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->&base->lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&zone->lock ->&data->lock FD: 1 BD: 77 +.+.: cbs_list_lock FD: 30 BD: 72 +...: &net->ipv6.addrconf_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 31 BD: 3698 +...: &ifa->lock ->batched_entropy_u32.lock ->crngs.lock ->&obj_hash[i].lock ->&base->lock FD: 52 BD: 3699 +...: &tb->tb6_lock ->&net->ipv6.fib6_walker_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->rt6_exception_lock ->&data->fib_event_queue_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&zone->lock ->&n->list_lock ->&____s->seqcount#2 ->quarantine_lock ->stock_lock ->(console_sem).lock FD: 1 BD: 3701 ++..: &net->ipv6.fib6_walker_lock FD: 443 BD: 76 +.+.: sk_lock-AF_INET ->slock-AF_INET ->&table->hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&queue->rskq_lock ->clock-AF_INET ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&mm->mmap_lock ->tk_core.seq.seqcount ->&sd->defer_lock ->&meta->lock ->kfence_freelist_lock ->mmu_notifier_invalidate_range_start ->&hashinfo->ehash_locks[i] ->elock-AF_INET ->&n->list_lock ->&dir->lock#2 ->batched_entropy_u8.lock ->rcu_node_0 ->remove_cache_srcu ->&____s->seqcount#8 ->once_mutex ->&pool->lock ->batched_entropy_u32.lock ->batched_entropy_u16.lock ->&ei->socket.wq.wait ->quarantine_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->free_vmap_area_lock ->vmap_area_lock ->stock_lock ->pcpu_alloc_mutex ->pack_mutex ->text_mutex ->&fp->aux->used_maps_mutex ->&list->lock#5 ->&f->f_owner.lock ->(&tw->tw_timer) ->&dccp_hashinfo.bhash[i].lock ->&data->lock ->&sighand->siglock ->&msk->pm.lock ->&im->lock ->&in_dev->mc_tomb_lock ->&sctp_port_hashtable[i].lock ->crngs.lock ->&asoc->wait ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET/1 ->k-slock-AF_INET ->k-clock-AF_INET ->&token_hash[i].lock ->k-sk_lock-AF_INET ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->cpu_hotplug_lock ->sctp_assocs_id_lock ->(console_sem).lock ->&sem->wait_lock ->&p->pi_lock ->hrtimer_bases.lock ->krc.lock FD: 94 BD: 90 +.-.: slock-AF_INET ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->tk_core.seq.seqcount ->(&req->rsk_timer) ->&base->lock ->&queue->rskq_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->elock-AF_INET ->&____s->seqcount#2 ->&dccp_hashinfo.bhash[i].lock ->&sk->sk_lock.wq ->&data->lock ->krc.lock ->key#24 ->batched_entropy_u32.lock ->quarantine_lock ->hrtimer_bases.lock FD: 1 BD: 126 ++-.: clock-AF_INET FD: 503 BD: 74 +.+.: sk_lock-AF_INET6 ->slock-AF_INET6 ->&table->hash[i].lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->fs_reclaim ->&mm->mmap_lock ->once_lock ->&pool->lock ->rcu_node_0 ->&rq->__lock ->tk_core.seq.seqcount ->&zone->lock ->clock-AF_INET6 ->&msk->pm.lock ->elock-AF_INET6 ->(kmod_concurrent_max).lock ->&____s->seqcount#2 ->&x->wait#17 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->running_helpers_waitq.lock ->&sighand->siglock ->&sctp_port_hashtable[i].lock ->crngs.lock ->&base->lock ->&asoc->wait ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&list->lock#5 ->stock_lock ->&rcu_state.expedited_wq ->krc.lock ->sctp_assocs_id_lock ->&list->lock#21 ->quarantine_lock ->tcpv6_prot_mutex ->device_spinlock ->crypto_alg_sem ->(crypto_chain).rwsem ->&x->wait#21 ->(&timer.timer) ->&hashinfo->ehash_locks[i] ->&ei->socket.wq.wait ->remove_cache_srcu ->&f->f_owner.lock ->&sem->wait_lock ->&p->pi_lock ->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->&token_hash[i].lock ->k-sk_lock-AF_INET6 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->ip6_sk_fl_lock ->&queue->rskq_lock ->(console_sem).lock ->&sctp_ep_hashtable[i].lock ->sk_lock-AF_INET6/1 ->&data->lock ->&sw_ctx_rx->wq ->cpu_hotplug_lock ->&ndev->lock ->&f->f_lock ->&dccp_hashinfo.bhash[i].lock ->tcp_md5sig_mutex ->key#27 ->acaddr_hash_lock ->&tb->tb6_lock ->&idev->mc_lock ->wlock-AF_INET6 ->&mux->lock ->prog_idr_lock ->bpf_lock ->&net->xfrm.xfrm_policy_lock ->&sw_ctx_tx->encrypt_compl_lock ->&sd->defer_lock FD: 86 BD: 92 +.-.: slock-AF_INET6 ->&obj_hash[i].lock ->elock-AF_INET6 ->&sk->sk_lock.wq ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock ->&base->lock ->pool_lock#2 ->&tcp_hashinfo.bhash[i].lock ->key#24 ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u16.lock ->&hashinfo->ehash_locks[i] ->&queue->rskq_lock ->&list->lock#21 ->&data->lock ->&dccp_hashinfo.bhash[i].lock ->krc.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->batched_entropy_u32.lock ->(&req->rsk_timer) ->clock-AF_INET6 ->&net->xfrm.xfrm_policy_lock ->&policy->lock ->&list->lock#29 FD: 47 BD: 129 ++--: clock-AF_INET6 ->pool_lock#2 ->&c->lock ->&n->list_lock ->rds_tcp_tc_list_lock ->&cp->cp_lock ->&rm->m_rs_lock ->&obj_hash[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->&list->lock#26 ->tk_core.seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&sd->defer_lock ->&mux->rx_lock ->&lruvec->lru_lock FD: 139 BD: 1 .+.+: kn->active#46 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 310 BD: 69 ++++: dev_addr_sem ->net_rwsem ->&tn->lock ->&sdata->sec_mtx ->fs_reclaim ->pool_lock#2 ->nl_table_lock ->rlock-AF_NETLINK ->rcu_node_0 ->&rq->__lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&tbl->lock ->&pn->hash_lock ->input_pool.lock ->&c->lock ->&____s->seqcount ->&br->lock ->&n->list_lock ->team->team_lock_key ->team->team_lock_key#2 ->remove_cache_srcu ->team->team_lock_key#3 ->team->team_lock_key#6 ->team->team_lock_key#5 ->team->team_lock_key#4 ->_xmit_ETHER ->&cfs_rq->removed.lock ->&hard_iface->bat_iv.ogm_buff_mutex ->quarantine_lock ->&____s->seqcount#2 FD: 888 BD: 2 +.+.: nlk_cb_mutex-GENERIC ->fs_reclaim ->&c->lock ->pool_lock#2 ->&____s->seqcount ->rtnl_mutex ->&rdev->wiphy.mtx ->rlock-AF_NETLINK ->&obj_hash[i].lock ->&devlink->lock_key ->&devlink->lock_key#2 ->&devlink->lock_key#3 ->&devlink->lock_key#4 ->&devlink->lock_key#5 ->&devlink->lock_key#6 ->genl_mutex ->&____s->seqcount#2 ->&rq->__lock ->&n->list_lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->&lock->wait_lock ->genl_mutex.wait_lock FD: 21 BD: 88 +.-.: &rdev->bss_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->quarantine_lock ->&zone->lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 105 BD: 1 +.-.: (&net->sctp.addr_wq_timer) ->&net->sctp.addr_wq_lock FD: 14 BD: 69 ++..: lapb_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&n->list_lock FD: 1 BD: 69 ++.-: x25_neigh_list_lock FD: 1 BD: 69 +...: _xmit_SLIP FD: 15 BD: 1 +.-.: (&eql->timer) ->&eql->queue.lock ->&obj_hash[i].lock ->&base->lock FD: 4 BD: 72 +.-.: &eql->queue.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 69 +...: &vi->refill_lock FD: 68 BD: 3621 +.-.: _xmit_ETHER#2 ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&data->lock FD: 148 BD: 87 +.+.: &local->chanctx_mtx ->fs_reclaim ->pool_lock#2 ->&data->mutex ->&rq->__lock ->&local->queue_stop_reason_lock ->&obj_hash[i].lock ->krc.lock ->rcu_node_0 ->&c->lock ->nl_table_lock ->nl_table_wait.lock ->&rdev->bss_lock ->&n->list_lock ->&rcu_state.gp_wq ->&____s->seqcount#2 ->&____s->seqcount FD: 27 BD: 88 +.+.: &data->mutex ->&rq->__lock FD: 19 BD: 3633 +...: &local->filter_lock ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->&____s->seqcount FD: 21 BD: 1 +.+.: (wq_completion)phy0 ->(work_completion)(&local->reconfig_filter) FD: 20 BD: 23 +.+.: (work_completion)(&local->reconfig_filter) ->&local->filter_lock FD: 83 BD: 70 +.-.: &dev->tx_global_lock ->_xmit_ETHER#2 ->&obj_hash[i].lock ->&base->lock ->&qdisc_xmit_lock_key ->_xmit_NONE#2 ->_xmit_LOOPBACK#2 ->&qdisc_xmit_lock_key#2 ->&batadv_netdev_xmit_lock_key ->_xmit_NETROM ->_xmit_IPGRE#2 ->&qdisc_xmit_lock_key#3 ->&vlan_netdev_xmit_lock_key ->_xmit_TUNNEL#2 ->&qdisc_xmit_lock_key#4 ->&qdisc_xmit_lock_key#5 FD: 41 BD: 3648 +.-.: &sch->q.lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->hrtimer_bases.lock ->&obj_hash[i].lock ->&q->current_entry_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->(console_sem).lock FD: 1 BD: 70 ....: class FD: 1 BD: 70 ....: (&tbl->proxy_timer) FD: 21 BD: 1 +.+.: (wq_completion)phy1 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 69 +...: _xmit_VOID FD: 1 BD: 69 +...: _xmit_X25 FD: 15 BD: 70 +...: &lapbeth->up_lock ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock FD: 72 BD: 70 +.-.: &lapb->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->&list->lock#17 ->&list->lock#18 ->&____s->seqcount#2 ->&n->list_lock FD: 2 BD: 156 +.+.: &(ei->i_block_reservation_lock) ->key#14 FD: 856 BD: 2 +.+.: (work_completion)(&work->work) ->devices_rwsem ->&obj_hash[i].lock ->quarantine_lock ->&rq->__lock FD: 832 BD: 2 +.+.: (work_completion)(&(&ifa->dad_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3700 +.-.: rt6_exception_lock FD: 1 BD: 95 ....: &____s->seqcount#8 FD: 17 BD: 3668 +.-.: &ul->lock ->pool_lock#2 ->&dir->lock#2 FD: 1 BD: 155 ....: &tty->ctrl.lock FD: 45 BD: 82 +.+.: fasync_lock ->&new->fa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: &buf->lock FD: 1 BD: 7 ....: &tty->flow.lock FD: 77 BD: 69 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->_xmit_ETHER#2 ->_xmit_SLIP#2 ->_xmit_NETROM ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount ->&sch->q.lock ->&data->lock FD: 27 BD: 8 +.+.: &net->packet.sklist_lock ->&rq->__lock FD: 252 BD: 3 +.+.: sk_lock-AF_PACKET ->slock-AF_PACKET ->&po->bind_lock ->rcu_node_0 ->&obj_hash[i].lock ->&rnp->exp_wq[0] ->&rq->__lock ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->pcpu_alloc_mutex ->&c->lock ->&n->list_lock ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&rnp->exp_wq[1] ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&po->pg_vec_lock ->init_mm.page_table_lock ->&rcu_state.expedited_wq ->stock_lock ->&____s->seqcount#2 ->remove_cache_srcu FD: 29 BD: 4 +...: slock-AF_PACKET ->&sk->sk_lock.wq FD: 33 BD: 5 +.+.: &po->bind_lock ->ptype_lock ->pool_lock#2 ->&dir->lock#2 ->&match->lock ->&obj_hash[i].lock FD: 5 BD: 3747 +.-.: rlock-AF_PACKET ->tk_core.seq.seqcount FD: 1 BD: 1 +...: wlock-AF_PACKET FD: 265 BD: 4 +.+.: &ldata->atomic_read_lock ->&tty->termios_rwsem ->(work_completion)(&buf->work) ->&rq->__lock FD: 1 BD: 5 +.+.: (work_completion)(&buf->work) FD: 30 BD: 1 ..-.: &(&idev->mc_dad_work)->timer FD: 223 BD: 1 +.+.: (wq_completion)mld ->(work_completion)(&(&idev->mc_dad_work)->work) ->(work_completion)(&(&idev->mc_ifc_work)->work) ->&rq->__lock FD: 221 BD: 2 +.+.: (work_completion)(&(&idev->mc_dad_work)->work) ->&idev->mc_lock FD: 30 BD: 1 ..-.: &(&dm_bufio_cleanup_old_work)->timer FD: 16 BD: 1 +.+.: (wq_completion)dm_bufio_cache ->(work_completion)(&(&dm_bufio_cleanup_old_work)->work) FD: 15 BD: 2 +.+.: (work_completion)(&(&dm_bufio_cleanup_old_work)->work) ->dm_bufio_clients_lock ->&obj_hash[i].lock ->&base->lock FD: 30 BD: 1 ..-.: &(&idev->mc_ifc_work)->timer FD: 221 BD: 2 +.+.: (work_completion)(&(&idev->mc_ifc_work)->work) ->&idev->mc_lock FD: 17 BD: 3645 +.-.: &ul->lock#2 ->pool_lock#2 ->&dir->lock#2 ->&c->lock FD: 22 BD: 3662 ++--: &n->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&(&n->ha_lock)->lock ->&____s->seqcount#9 ->&c->lock ->&data->lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->&dir->lock ->stock_lock FD: 1 BD: 3672 +.--: &____s->seqcount#9 FD: 30 BD: 1 ..-.: &(&ifa->dad_work)->timer FD: 38 BD: 2 +.+.: (work_completion)(&w->work)#2 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->nf_conntrack_mutex ->nf_conntrack_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 1 BD: 3677 ...-: &____s->seqcount#10 FD: 1 BD: 122 +.-.: &ct->lock FD: 141 BD: 3 +.+.: fanout_mutex ->&rq->__lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&po->bind_lock ->&obj_hash[i].lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 3 +...: clock-AF_PACKET FD: 1 BD: 3 ....: elock-AF_PACKET FD: 73 BD: 1 +.-.: (&lapb->t1timer) ->&lapb->lock FD: 84 BD: 1 +.-.: (&dev->watchdog_timer) ->&dev->tx_global_lock FD: 30 BD: 1 ..-.: drivers/regulator/core.c:6262 FD: 4 BD: 2 +.+.: (regulator_init_complete_work).work ->&k->list_lock ->&k->k_lock FD: 5 BD: 3663 +.-.: &nf_conntrack_locks[i] ->&nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 4 BD: 3664 +.-.: &nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 1 BD: 125 +.-.: &hashinfo->ehash_locks[i] FD: 2 BD: 3663 +.-.: &(&n->ha_lock)->lock ->&____s->seqcount#9 FD: 1 BD: 3658 +.-.: lock#8 FD: 1 BD: 3660 ..-.: id_table_lock FD: 1 BD: 109 ..-.: (&req->rsk_timer) FD: 1 BD: 109 +.-.: &queue->rskq_lock FD: 10 BD: 109 +.-.: tcp_metrics_lock ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 89 BD: 76 +.-.: slock-AF_INET/1 ->tk_core.seq.seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&base->lock ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->&meta->lock ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET ->&zone->lock ->&sctp_ep_hashtable[i].lock ->clock-AF_INET ->&____s->seqcount#2 ->&data->lock ->key#23 ->krc.lock ->&sctp_port_hashtable[i].lock FD: 1 BD: 111 +.-.: &sd->defer_lock FD: 127 BD: 1 +.-.: (&icsk->icsk_delack_timer) ->slock-AF_INET ->slock-AF_INET6 ->k-slock-AF_INET6 FD: 127 BD: 1 +.-.: (&icsk->icsk_retransmit_timer) ->slock-AF_INET ->slock-AF_INET6 ->k-slock-AF_INET6 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->stock_lock FD: 1 BD: 100 ..-.: elock-AF_INET FD: 1 BD: 157 ....: key#14 FD: 85 BD: 155 +.+.: &sbi->s_orphan_lock ->&ei->i_raw_lock ->&rq->__lock ->&lock->wait_lock ->rcu_node_0 ->&ret->b_state_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&____s->seqcount ->bit_wait_table + i ->&mapping->private_lock FD: 1 BD: 4261 ....: key#15 FD: 139 BD: 1 .+.+: kn->active#47 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 1 +.+.: &futex_queues[i].lock FD: 1 BD: 4 ....: &on->poll FD: 1 BD: 5 +.+.: module_mutex FD: 3 BD: 77 +.+.: once_mutex ->crngs.lock FD: 236 BD: 2 .+.+: sb_writers#9 ->&attr->mutex ->&mm->mmap_lock FD: 235 BD: 3 +.+.: &attr->mutex ->&mm->mmap_lock FD: 153 BD: 1 +.+.: &type->s_umount_key#41/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&c->lock ->&n->list_lock ->&sb->s_type->i_lock_key#30 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 40 BD: 4271 +.+.: &sb->s_type->i_lock_key#30 ->&dentry->d_lock FD: 875 BD: 2 .+.+: sb_writers#10 ->mount_lock ->&type->i_mutex_dir_key#6 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&type->i_mutex_dir_key#6/1 ->&root->kernfs_iattr_rwsem ->&dentry->d_lock ->tomoyo_ss ->stock_lock ->&rq->__lock ->&sb->s_type->i_mutex_key#15 ->iattr_mutex ->&c->lock ->&xattrs->lock ->&____s->seqcount ->&p->lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#30 ->&wb->list_lock FD: 117 BD: 3 ++++: &type->i_mutex_dir_key#6 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#30 ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->stock_lock ->&rq->__lock ->pool_lock#2 ->&obj_hash[i].lock ->&xa->xa_lock#13 FD: 139 BD: 1 ++++: kn->active#48 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->stock_lock ->&rq->__lock ->&c->lock FD: 139 BD: 3 +.+.: &sb->s_type->i_mutex_key#15 ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem FD: 153 BD: 1 +.+.: &type->s_umount_key#42/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 40 BD: 4271 +.+.: &sb->s_type->i_lock_key#31 ->&dentry->d_lock FD: 139 BD: 1 ++++: &type->s_umount_key#43 ->shrinker_rwsem ->percpu_ref_switch_lock ->&root->kernfs_supers_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#31 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->&lru->node[i].lock ->&rq->__lock ->&wb->list_lock FD: 841 BD: 2 +.+.: (work_completion)(&cgrp->bpf.release_work) ->cgroup_mutex ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 847 BD: 1 +.+.: (wq_completion)cgroup_destroy ->(work_completion)(&css->destroy_work) ->(work_completion)(&(&css->destroy_rwork)->work) FD: 841 BD: 2 +.+.: (work_completion)(&css->destroy_work) ->cgroup_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 845 BD: 2 +.+.: (work_completion)(&(&css->destroy_rwork)->work) ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 ->&cgrp->pidlist_mutex ->(wq_completion)cgroup_pidlist_destroy ->&wq->mutex ->(work_completion)(&cgrp->release_agent_work) ->cgroup_mutex ->cgroup_rstat_lock ->pcpu_lock ->&root->kernfs_rwsem ->kernfs_idr_lock FD: 140 BD: 12 +.+.: &cgrp->pidlist_mutex ->css_set_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->&base->lock FD: 142 BD: 3 +.+.: (wq_completion)cgroup_pidlist_destroy ->(work_completion)(&(&l->destroy_dwork)->work) FD: 1 BD: 3 +.+.: (work_completion)(&cgrp->release_agent_work) FD: 875 BD: 2 .+.+: sb_writers#11 ->mount_lock ->&type->i_mutex_dir_key#7 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&type->i_mutex_dir_key#7/1 ->&rq->__lock ->&root->kernfs_iattr_rwsem ->&dentry->d_lock ->tomoyo_ss ->&c->lock ->stock_lock ->&sb->s_type->i_mutex_key#16 ->iattr_mutex ->&xattrs->lock ->pool_lock#2 ->&____s->seqcount ->&p->lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#31 ->&wb->list_lock ->&____s->seqcount#2 ->&lock->wait_lock ->&p->pi_lock FD: 117 BD: 3 ++++: &type->i_mutex_dir_key#7 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->pool_lock#2 ->&xa->xa_lock#13 ->&obj_hash[i].lock ->stock_lock ->&c->lock ->&n->list_lock ->&rq->__lock ->&____s->seqcount ->&____s->seqcount#2 FD: 1 BD: 18 +.+.: &dom->lock FD: 139 BD: 1 .+.+: kn->active#49 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 139 BD: 3 +.+.: &sb->s_type->i_mutex_key#16 ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem FD: 292 BD: 1 .+.+: kn->active#50 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock FD: 42 BD: 4 +.+.: &type->s_umount_key#44 ->sb_lock ->&dentry->d_lock FD: 156 BD: 3 +.+.: &sb->s_type->i_mutex_key#17 ->namespace_sem ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->pin_fs_lock ->sb_lock ->&type->s_umount_key#44 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->mount_lock ->&obj_hash[i].lock ->entries_lock FD: 249 BD: 2 .+.+: sb_writers#12 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&sb->s_type->i_mutex_key#17 FD: 1 BD: 71 ++..: &pn->hash_lock FD: 53 BD: 1 +...: &net->ipv6.fib6_gc_lock ->&obj_hash[i].lock FD: 1 BD: 69 +...: _xmit_IEEE802154 FD: 32 BD: 3 ..-.: &ei->i_completed_io_lock FD: 183 BD: 1 +.+.: (wq_completion)ext4-rsv-conversion ->(work_completion)(&ei->i_rsv_conversion_work) FD: 182 BD: 2 +.+.: (work_completion)(&ei->i_rsv_conversion_work) ->&ei->i_completed_io_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->pool_lock#2 ->&ext4__ioend_wq[i] ->&ret->b_uptodate_lock ->&folio_wait_table[i] ->mmu_notifier_invalidate_range_start ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->rcu_node_0 ->batched_entropy_u8.lock ->quarantine_lock ->&rcu_state.expedited_wq ->&base->lock ->&lruvec->lru_lock ->&cfs_rq->removed.lock ->&pgdat->reclaim_wait[i] ->&c->lock FD: 28 BD: 158 ....: &journal->j_wait_reserved ->&p->pi_lock FD: 1 BD: 3 ....: &ext4__ioend_wq[i] FD: 137 BD: 8 +.+.: swap_cgroup_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 FD: 140 BD: 8 +.+.: swapon_mutex ->fs_reclaim ->pool_lock#2 ->swap_lock ->percpu_ref_switch_lock ->(console_sem).lock FD: 2 BD: 4251 +.+.: &p->lock#2 ->swap_avail_lock FD: 1 BD: 4252 +.+.: swap_avail_lock FD: 1 BD: 8 ....: proc_poll_wait.lock FD: 291 BD: 1 +.+.: swap_slots_cache_enable_mutex ->cpu_hotplug_lock ->swap_lock FD: 1 BD: 4248 +.+.: swap_slots_cache_mutex FD: 28 BD: 166 ..-.: &rq_wait->wait ->&p->pi_lock FD: 31 BD: 1 +.-.: (&timer) ->&obj_hash[i].lock ->&base->lock ->&txlock ->&txwq FD: 1 BD: 3741 ..-.: &list->lock#5 FD: 7 BD: 70 +...: _xmit_SLIP#2 ->&eql->queue.lock FD: 41 BD: 72 +...: _xmit_NETROM ->(console_sem).lock ->console_owner_lock ->console_owner ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock FD: 18 BD: 1 +...: _xmit_X25#2 ->&lapbeth->up_lock FD: 91 BD: 158 +.+.: &lg->lg_mutex ->&ei->i_prealloc_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&mapping->private_lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&rq->__lock ->&c->lock ->remove_cache_srcu ->rcu_node_0 ->key#3 ->&bgl->locks[i].lock ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->&lock->wait_lock ->&cfs_rq->removed.lock ->bit_wait_table + i ->&____s->seqcount ->&dd->lock FD: 1 BD: 160 +.+.: &pa->pa_lock FD: 2 BD: 159 +.+.: &lg->lg_prealloc_lock ->&pa->pa_lock FD: 90 BD: 1 +.-.: (&n->timer) ->&n->lock ->pool_lock#2 ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&c->lock ->icmp_global.lock ->&dir->lock#2 ->&dir->lock ->stock_lock ->&____s->seqcount ->&ul->lock#2 ->&data->lock ->&n->list_lock ->quarantine_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 30 BD: 1 ..-.: net/wireless/reg.c:236 FD: 832 BD: 2 +.+.: (reg_check_chans).work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 30 BD: 1 ..-.: net/wireless/reg.c:533 FD: 832 BD: 2 +.+.: (crda_timeout).work ->rtnl_mutex FD: 95 BD: 1 +.-.: (&sk->sk_timer) ->slock-AF_INET FD: 1 BD: 197 ....: &newf->resize_wait FD: 3 BD: 144 ..-.: &kcov->lock ->kcov_remote_lock FD: 167 BD: 1 +.+.: pid_caches_mutex ->slab_mutex FD: 42 BD: 1 +.+.: &type->s_umount_key#45 ->sb_lock ->&dentry->d_lock FD: 150 BD: 2 ++++: &sb->s_type->i_mutex_key#18 ->namespace_sem ->&dentry->d_lock ->tk_core.seq.seqcount FD: 1 BD: 24 ++++: hci_sk_list.lock FD: 1 BD: 1 +.+.: (work_completion)(&(&data->open_timeout)->work) FD: 315 BD: 1 +.+.: &data->open_mutex ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->&x->wait#9 ->hci_index_ida.xa_lock ->cpu_hotplug_lock ->wq_pool_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&n->list_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#81 ->&dev->devres_lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&rfkill->lock ->hci_dev_list_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->(pm_chain_head).rwsem ->&list->lock#8 ->&data->read_wait ->uevent_sock_mutex.wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 2 ....: hci_index_ida.xa_lock FD: 3 BD: 14 +.+.: subsys mutex#81 ->&k->k_lock FD: 1 BD: 6 ++++: hci_dev_list_lock FD: 146 BD: 1 +.+.: (wq_completion)hci0 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 143 BD: 7 +.+.: (work_completion)(&hdev->power_on) ->&hdev->req_lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock FD: 37 BD: 9 +.+.: &hdev->req_lock ->&obj_hash[i].lock ->&list->lock#6 ->pool_lock#2 ->&list->lock#7 ->&hdev->req_wait_q ->&base->lock ->&rq->__lock ->&c->lock ->&n->list_lock ->(&timer.timer) ->batched_entropy_u8.lock ->kfence_freelist_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&____s->seqcount ->&cfs_rq->removed.lock FD: 1 BD: 25 ....: &list->lock#6 FD: 1 BD: 10 ....: &list->lock#7 FD: 28 BD: 17 ....: &hdev->req_wait_q ->&p->pi_lock FD: 1 BD: 10 ....: &list->lock#8 FD: 28 BD: 10 ....: &data->read_wait ->&p->pi_lock FD: 295 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->sock_cookie_ida.xa_lock ->&p->alloc_lock ->pool_lock#2 ->&c->lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->clock-AF_BLUETOOTH ->&____s->seqcount ->mgmt_chan_list_lock ->&rq->__lock ->&data->lock FD: 1 BD: 4 +...: slock-AF_BLUETOOTH-BTPROTO_HCI FD: 1 BD: 4 ....: sock_cookie_ida.xa_lock FD: 146 BD: 1 +.+.: (wq_completion)hci1 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 302 BD: 1 +.+.: (wq_completion)hci0#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 141 BD: 7 +.+.: (work_completion)(&hdev->cmd_work) ->&list->lock#6 ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->&list->lock#8 ->&data->read_wait ->&obj_hash[i].lock ->&rq->__lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->&data->lock FD: 302 BD: 1 +.+.: (wq_completion)hci1#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 293 BD: 7 +.+.: (work_completion)(&hdev->rx_work) ->&list->lock#6 ->lock#6 ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&rq->__lock ->&hdev->lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&obj_hash[i].lock ->&hdev->req_wait_q ->&c->lock ->&base->lock ->&n->list_lock ->chan_list_lock ->remove_cache_srcu FD: 290 BD: 12 +.+.: &hdev->lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#9 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&k->k_lock ->subsys mutex#81 ->&list->lock#6 ->&hdev->unregister_lock ->hci_cb_list_lock ->&base->lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&n->list_lock ->&rq->__lock ->&conn->chan_lock ->(console_sem).lock ->rlock-AF_BLUETOOTH ->&lock->wait_lock ->sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->&data->lock FD: 146 BD: 1 +.+.: (wq_completion)hci2 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 302 BD: 1 +.+.: (wq_completion)hci2#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 138 BD: 13 +.+.: &hdev->unregister_lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&hdev->cmd_sync_work_lock ->&c->lock FD: 1 BD: 21 +.+.: &hdev->cmd_sync_work_lock FD: 39 BD: 7 +.+.: (work_completion)(&hdev->cmd_sync_work) ->&hdev->cmd_sync_work_lock ->&hdev->req_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 14 +.+.: &conn->ident_lock FD: 1 BD: 15 ....: &list->lock#9 FD: 43 BD: 16 +.+.: &conn->chan_lock ->&rq->__lock ->&chan->lock/1 FD: 32 BD: 7 +.+.: (work_completion)(&hdev->tx_work) ->&list->lock#9 ->tk_core.seq.seqcount ->&list->lock#8 ->&data->read_wait ->&list->lock#6 FD: 2 BD: 7 +.+.: (work_completion)(&conn->pending_rx_work) ->&list->lock#10 FD: 1 BD: 8 ....: &list->lock#10 FD: 146 BD: 1 +.+.: (wq_completion)hci3 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 1 BD: 18 +...: clock-AF_BLUETOOTH FD: 1 BD: 18 ....: rlock-AF_BLUETOOTH FD: 1 BD: 18 ....: wlock-AF_BLUETOOTH FD: 302 BD: 1 +.+.: (wq_completion)hci3#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 27 BD: 1 +.+.: &sb->s_type->i_mutex_key#19 ->&rq->__lock FD: 1 BD: 1 +.+.: &undo_list->lock FD: 1 BD: 69 +...: &nr_netdev_addr_lock_key FD: 1 BD: 69 +...: listen_lock FD: 146 BD: 1 +.+.: (wq_completion)hci4 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 302 BD: 1 +.+.: (wq_completion)hci4#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 2 BD: 12 +.+.: rdma_nets.xa_lock ->pool_lock#2 FD: 146 BD: 1 +.+.: (wq_completion)hci5 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 302 BD: 1 +.+.: (wq_completion)hci5#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 1 BD: 4 +.+.: &____s->seqcount#11 FD: 2 BD: 3 +.+.: &(&net->ipv4.ping_group_range.lock)->lock ->&____s->seqcount#11 FD: 2 BD: 69 +.+.: &r->consumer_lock ->&r->producer_lock FD: 1 BD: 3600 +.-.: &r->producer_lock FD: 19 BD: 3594 +...: &bridge_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->&____s->seqcount ->&____s->seqcount#2 ->&zone->lock FD: 35 BD: 75 +.-.: &br->hash_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&n->list_lock ->&____s->seqcount#2 ->&zone->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock FD: 139 BD: 70 +.+.: j1939_netdev_lock ->&rq->__lock ->fs_reclaim ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&net->can.rcvlists_lock ->&obj_hash[i].lock ->&priv->lock FD: 19 BD: 3589 +...: &dev_addr_list_lock_key#2 ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock FD: 10 BD: 69 +...: &bat_priv->tvlv.handler_list_lock ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 14 BD: 76 +...: &bat_priv->tvlv.container_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&____s->seqcount ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 19 BD: 3591 +...: &batadv_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 10 BD: 74 +...: &bat_priv->softif_vlan_list_lock ->pool_lock#2 ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 3603 +...: key#16 FD: 6 BD: 3602 +...: &bat_priv->tt.changes_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount FD: 32 BD: 1 ..-.: &(&bat_priv->nc.work)->timer FD: 65 BD: 1 +.+.: (wq_completion)bat_events ->(work_completion)(&(&bat_priv->nc.work)->work) ->(work_completion)(&(&bat_priv->mcast.work)->work) ->(work_completion)(&(&bat_priv->orig_work)->work) ->(work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->(work_completion)(&(&bat_priv->tt.work)->work) ->&rq->__lock ->(work_completion)(&(&bat_priv->dat.work)->work) ->(work_completion)(&(&bat_priv->bla.work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 33 BD: 2 +.+.: (work_completion)(&(&bat_priv->nc.work)->work) ->key#17 ->key#18 ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&cfs_rq->removed.lock FD: 1 BD: 3 +...: key#17 FD: 1 BD: 3 +...: key#18 FD: 167 BD: 70 +.+.: init_lock ->slab_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->crngs.lock FD: 1 BD: 3599 +.-.: deferred_lock FD: 832 BD: 2 +.+.: deferred_process_work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 69 ....: target_list_lock FD: 50 BD: 72 +.-.: &br->lock ->&br->hash_lock ->lweventlist_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->nl_table_lock ->nl_table_wait.lock ->&br->multicast_lock ->&c->lock ->&____s->seqcount ->quarantine_lock ->&n->list_lock ->&____s->seqcount#2 FD: 161 BD: 1 +.+.: (wq_completion)bond0 ->(work_completion)(&(&slave->notify_work)->work) FD: 160 BD: 3633 +.+.: (work_completion)(&(&slave->notify_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->stock_lock FD: 1 BD: 3573 +.+.: &bond->stats_lock/1 FD: 32 BD: 1 ..-.: &(&slave->notify_work)->timer FD: 32 BD: 1 ..-.: &(&bat_priv->mcast.work)->timer FD: 37 BD: 2 +.+.: (work_completion)(&(&bat_priv->mcast.work)->work) ->pool_lock#2 ->&bat_priv->mcast.mla_lock ->&obj_hash[i].lock ->&base->lock ->kfence_freelist_lock ->&meta->lock ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->quarantine_lock FD: 33 BD: 3 +.+.: &bat_priv->mcast.mla_lock ->pool_lock#2 ->key#16 ->&bat_priv->tt.changes_list_lock ->&bat_priv->tvlv.container_list_lock ->&c->lock ->&n->list_lock FD: 292 BD: 70 +.+.: team->team_lock_key ->fs_reclaim ->pool_lock#2 ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&c->lock ->&____s->seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->&rq->__lock ->&bridge_netdev_addr_lock_key/2 ->&macvlan_netdev_addr_lock_key/3 ->&idev->mc_lock ->pcpu_alloc_mutex ->&tb->tb6_lock ->(inet6addr_validator_chain).rwsem ->stock_lock ->pcpu_lock ->&ifa->lock ->&n->list_lock ->&dev_addr_list_lock_key#2/4 FD: 161 BD: 1 +.+.: (wq_completion)bond0#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond0#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 40 BD: 72 +.+.: &hard_iface->bat_iv.ogm_buff_mutex ->crngs.lock ->pool_lock#2 ->batched_entropy_u8.lock ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&c->lock ->rcu_node_0 ->&rq->__lock ->kfence_freelist_lock ->&____s->seqcount ->&bat_priv->tt.commit_lock ->&bat_priv->tvlv.container_list_lock ->&____s->seqcount#2 ->&n->list_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 14 BD: 73 +...: &bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&base->lock FD: 192 BD: 70 +.+.: team->team_lock_key#2 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&c->lock ->&____s->seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->(console_sem).lock ->pool_lock#2 ->lweventlist_lock FD: 32 BD: 1 ..-.: &(&bat_priv->orig_work)->timer FD: 31 BD: 2 +.+.: (work_completion)(&(&bat_priv->orig_work)->work) ->key#19 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->pool_lock#2 ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3 +...: key#19 FD: 32 BD: 1 ..-.: &(&hdev->cmd_timer)->timer FD: 30 BD: 1 ..-.: drivers/net/wireguard/ratelimiter.c:20 FD: 43 BD: 7 +.+.: (work_completion)(&(&hdev->cmd_timer)->work) ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 31 BD: 2 +.+.: (gc_work).work ->tk_core.seq.seqcount ->"ratelimiter_table_lock" ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 3 +.+.: "ratelimiter_table_lock" FD: 1 BD: 69 +...: _xmit_NONE FD: 1 BD: 69 +...: lock#9 FD: 161 BD: 1 +.+.: (wq_completion)bond0#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 70 ...-: &____s->seqcount#12 FD: 192 BD: 70 +.+.: team->team_lock_key#3 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&c->lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&____s->seqcount ->pool_lock#2 ->&n->list_lock ->lweventlist_lock ->(console_sem).lock ->&rq->__lock FD: 161 BD: 1 +.+.: (wq_completion)bond0#5 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 3592 +.-.: &hsr->list_lock FD: 161 BD: 1 +.+.: (wq_completion)bond0#6 ->(work_completion)(&(&slave->notify_work)->work) FD: 192 BD: 70 +.+.: team->team_lock_key#4 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->lweventlist_lock ->(console_sem).lock ->&rq->__lock ->&n->list_lock ->pool_lock#2 FD: 293 BD: 70 +.+.: team->team_lock_key#5 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->&n->list_lock ->&br->lock ->&base->lock ->&bridge_netdev_addr_lock_key ->&idev->mc_lock ->pcpu_alloc_mutex ->&tb->tb6_lock ->(inet6addr_validator_chain).rwsem ->stock_lock ->pcpu_lock ->&ifa->lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->&dev_addr_list_lock_key#2/1 FD: 192 BD: 70 +.+.: team->team_lock_key#6 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&n->list_lock ->lweventlist_lock ->(console_sem).lock ->&rq->__lock ->pool_lock#2 FD: 8 BD: 3588 +...: &vlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 25 BD: 69 +.-.: (&app->join_timer) ->&app->lock ->&list->lock#11 FD: 16 BD: 71 +.-.: &app->lock ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&list->lock#11 ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 72 ..-.: &list->lock#11 FD: 32 BD: 1 ..-.: &(&forw_packet_aggr->delayed_work)->timer FD: 48 BD: 2 +.+.: (work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->&hard_iface->bat_iv.ogm_buff_mutex ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&c->lock ->&n->list_lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->rcu_node_0 ->quarantine_lock ->&cfs_rq->removed.lock ->&base->lock ->&rcu_state.expedited_wq FD: 21 BD: 69 +.-.: (&app->join_timer)#2 ->&app->lock#2 ->&list->lock#12 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock FD: 11 BD: 70 +.-.: &app->lock#2 ->pool_lock#2 ->&list->lock#12 ->&c->lock ->&n->list_lock ->&obj_hash[i].lock FD: 1 BD: 71 ..-.: &list->lock#12 FD: 9 BD: 3588 +...: &macvlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 FD: 2 BD: 3588 +...: &dev_addr_list_lock_key#3 ->pool_lock#2 FD: 1 BD: 69 ....: &xa->xa_lock#14 FD: 8 BD: 3588 +...: &dev_addr_list_lock_key#3/1 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 2 BD: 69 +.+.: &tap_major->minor_lock ->pool_lock#2 FD: 3 BD: 69 +.+.: subsys mutex#82 ->&k->k_lock FD: 851 BD: 1 .+.+: kn->active#51 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock ->&c->lock ->&____s->seqcount FD: 849 BD: 9 +.+.: nsim_bus_dev_list_lock ->fs_reclaim ->pool_lock#2 ->nsim_bus_dev_ids.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->nsim_bus_dev_list_lock.wait_lock ->subsys mutex#83 ->&sem->wait_lock FD: 851 BD: 1 .+.+: kn->active#52 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock ->&rq->__lock ->&c->lock FD: 1 BD: 10 ....: nsim_bus_dev_ids.xa_lock FD: 2 BD: 18 +.+.: devlinks.xa_lock ->pool_lock#2 FD: 836 BD: 12 +.+.: &devlink->lock_key ->crngs.lock ->fs_reclaim ->pool_lock#2 ->devlinks.xa_lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#15 ->&rq->__lock ->&n->list_lock ->quarantine_lock ->remove_cache_srcu ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 2 BD: 18 +.+.: &xa->xa_lock#15 ->pool_lock#2 FD: 2 BD: 1 +.-.: (&tun->flow_gc_timer) ->&tun->lock FD: 1 BD: 70 +.-.: &tun->lock FD: 1 BD: 3701 +...: &data->fib_event_queue_lock FD: 143 BD: 2 +.+.: (work_completion)(&data->fib_event_work) ->&data->fib_event_queue_lock ->&data->fib_lock ->&rq->__lock FD: 141 BD: 3 +.+.: &data->fib_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&____s->seqcount ->&n->list_lock ->pool_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->rcu_node_0 ->quarantine_lock ->&rcu_state.expedited_wq FD: 1 BD: 18 ....: &(&fn_net->fib_chain)->lock FD: 1 BD: 69 +...: &devlink_port->type_lock FD: 30 BD: 1 ..-.: &(&nsim_dev->trap_data->trap_report_dw)->timer FD: 37 BD: 2 +.+.: (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 10 +.+.: nsim_bus_dev_list_lock.wait_lock FD: 139 BD: 69 +.+.: bpf_devs_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&rq->__lock ->&obj_hash[i].lock FD: 1 BD: 69 +.+.: (work_completion)(&(&devlink_port->type_warn_dw)->work) FD: 1 BD: 69 +.+.: &vn->sock_lock FD: 30 BD: 1 ..-.: &(&hwstats->traffic_dw)->timer FD: 29 BD: 2 +.+.: (work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 28 BD: 70 +.+.: &hwstats->hwsdev_list_lock ->rcu_node_0 ->&rq->__lock FD: 17 BD: 69 +.-.: (&app->periodic_timer) ->&app->lock FD: 1 BD: 10 +.+.: subsys mutex#83 FD: 837 BD: 12 +.+.: &devlink->lock_key#2 ->crngs.lock ->fs_reclaim ->&c->lock ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#15 ->&____s->seqcount ->pool_lock#2 ->&n->list_lock ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 154 BD: 69 +.+.: devnet_rename_sem ->(console_sem).lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->&root->kernfs_rwsem ->kernfs_rename_lock ->&c->lock ->&____s->seqcount ->uevent_sock_mutex ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 243 ....: kernfs_rename_lock FD: 308 BD: 71 +.+.: &nft_net->commit_mutex ->&rq->__lock ->fs_reclaim ->stock_lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&____s->seqcount ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_node_0 ->&rnp->exp_wq[1] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->(work_completion)(&ht->run_work) ->&ht->mutex ->&rnp->exp_wq[2] ->&base->lock ->(work_completion)(&(&priv->gc_work)->work) ->rcu_state.barrier_mutex ->rcu_state.barrier_mutex.wait_lock ->&____s->seqcount#2 ->(console_sem).lock ->console_owner_lock ->console_owner ->&lock->wait_lock ->nf_ct_proto_mutex ->(work_completion)(&(&priv->gc_work)->work)#2 ->remove_cache_srcu ->quarantine_lock ->nl_table_lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->&p->alloc_lock ->&rnp->exp_wq[0] ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->defrag4_mutex ->tk_core.seq.seqcount FD: 32 BD: 20 +.+.: &nsim_trap_data->trap_lock ->pool_lock#2 ->crngs.lock ->&nsim_dev->fa_cookie_lock ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&base->lock ->&pgdat->kswapd_wait FD: 1 BD: 21 +...: &nsim_dev->fa_cookie_lock FD: 457 BD: 69 +.+.: &wg->device_update_lock ->&wg->static_identity.lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->pcpu_alloc_mutex ->&handshake->lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&table->lock ->&peer->endpoint_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->cpu_hotplug_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&wg->socket_update_lock ->&rq->__lock ->&list->lock#14 ->&pool->lock/1 ->&____s->seqcount#2 ->&n->list_lock ->stock_lock ->remove_cache_srcu ->&wq->mutex ->wq_pool_mutex ->wq_mayday_lock ->&p->pi_lock ->&x->wait ->pcpu_lock ->&r->consumer_lock#2 ->rcu_state.barrier_mutex ->init_lock ->&zone->lock FD: 140 BD: 126 ++++: &wg->static_identity.lock ->&handshake->lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 139 BD: 128 ++++: &handshake->lock ->crngs.lock ->tk_core.seq.seqcount ->&table->lock#2 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&rq->__lock ->&____s->seqcount#2 ->remove_cache_srcu ->&n->list_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 70 +.+.: &table->lock FD: 72 BD: 129 ++-.: &peer->endpoint_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 30 BD: 1 ..-.: &(&conn->info_timer)->timer FD: 44 BD: 2 +.+.: (work_completion)(&(&conn->info_timer)->work) ->&conn->chan_lock FD: 837 BD: 12 +.+.: &devlink->lock_key#3 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#15 ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&rq->__lock ->pool_lock#2 ->&nsim_trap_data->trap_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 69 +...: _xmit_SIT FD: 7 BD: 3588 +...: &bridge_netdev_addr_lock_key/1 ->pool_lock#2 ->&c->lock FD: 39 BD: 69 +.-.: (&brmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 38 BD: 3598 +.-.: &br->multicast_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->&c->lock ->&n->list_lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->quarantine_lock ->&meta->lock ->init_task.mems_allowed_seq.seqcount FD: 39 BD: 69 +.-.: (&brmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 1 BD: 69 +...: _xmit_TUNNEL FD: 19 BD: 3591 +...: _xmit_IPGRE ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 74 BD: 1 +.-.: (&in_dev->mr_ifc_timer) ->&obj_hash[i].lock ->batched_entropy_u32.lock ->&base->lock FD: 837 BD: 12 +.+.: &devlink->lock_key#4 ->crngs.lock ->fs_reclaim ->&c->lock ->&n->list_lock ->devlinks.xa_lock ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#15 ->&rq->__lock ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->pool_lock#2 ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 30 BD: 1 ..-.: &(&br->gc_work)->timer FD: 37 BD: 70 +.+.: (work_completion)(&(&br->gc_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 7 BD: 69 +...: _xmit_TUNNEL6 ->&c->lock ->pool_lock#2 FD: 59 BD: 3589 +.-.: _xmit_TUNNEL6#2 ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&____s->seqcount ->&data->lock FD: 837 BD: 12 +.+.: &devlink->lock_key#5 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->&c->lock ->&xa->xa_lock#15 ->&____s->seqcount ->pool_lock#2 ->pcpu_alloc_mutex ->&n->list_lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->nl_table_wait.lock ->stack_depot_init_mutex ->&rq->__lock ->&nsim_trap_data->trap_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 46 BD: 3590 +...: &dev_addr_list_lock_key/1 ->_xmit_ETHER ->&batadv_netdev_addr_lock_key ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&bridge_netdev_addr_lock_key ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 46 BD: 3590 +...: &dev_addr_list_lock_key#2/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->pool_lock#2 ->_xmit_IPGRE ->&obj_hash[i].lock ->krc.lock ->&bridge_netdev_addr_lock_key FD: 39 BD: 1 +.-.: (&pmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 39 BD: 1 +.-.: (&pmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 837 BD: 12 +.+.: &devlink->lock_key#6 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#15 ->&____s->seqcount ->pcpu_alloc_mutex ->&n->list_lock ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->remove_cache_srcu ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 3588 +...: _xmit_ETHER/1 FD: 25 BD: 1 +.-.: (&hsr->announce_timer) FD: 23 BD: 3590 +.-.: &hsr->seqnr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&meta->lock ->kfence_freelist_lock ->&data->lock ->quarantine_lock FD: 1 BD: 3591 +.-.: &new_node->seq_out_lock FD: 15 BD: 1 +.-.: (&hsr->prune_timer) ->&hsr->list_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 69 +.+.: &nn->netlink_tap_lock FD: 7 BD: 3588 +...: &batadv_netdev_addr_lock_key/1 ->&c->lock ->&____s->seqcount FD: 32 BD: 1 ..-.: &(&bat_priv->tt.work)->timer FD: 34 BD: 2 +.+.: (work_completion)(&(&bat_priv->tt.work)->work) ->key#16 ->key#20 ->&bat_priv->tt.req_list_lock ->&bat_priv->tt.roam_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 3 +...: key#20 FD: 1 BD: 3 +...: &bat_priv->tt.req_list_lock FD: 1 BD: 3 +...: &bat_priv->tt.roam_list_lock FD: 44 BD: 3588 +...: &vlan_netdev_addr_lock_key/1 ->_xmit_ETHER ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock FD: 107 BD: 1 +.-.: (&ndev->rs_timer) ->&ndev->lock ->pool_lock#2 ->&dir->lock#2 ->&ul->lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->init_task.mems_allowed_seq.seqcount ->key#29 FD: 44 BD: 3589 +...: &macvlan_netdev_addr_lock_key/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 1 BD: 70 +.-.: &list->lock#13 FD: 31 BD: 69 +.+.: (work_completion)(&port->bc_work) ->&list->lock#13 ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&rq->__lock FD: 19 BD: 70 +...: &ipvlan->addrs_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->krc.lock FD: 63 BD: 3589 +...: &macsec_netdev_addr_lock_key/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->pool_lock#2 ->(console_sem).lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->krc.lock FD: 44 BD: 69 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->&sch->q.lock FD: 1 BD: 3591 +.-.: key#21 FD: 20 BD: 73 +...: &bat_priv->tt.commit_lock ->key#16 ->&bat_priv->softif_vlan_list_lock ->&bat_priv->tt.changes_list_lock ->&bat_priv->tt.last_changeset_lock ->pool_lock#2 ->&bat_priv->tvlv.container_list_lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 27 BD: 70 +.+.: &wg->socket_update_lock ->&rq->__lock FD: 18 BD: 119 +.-.: &list->lock#14 ->&data->lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg0 ->(work_completion)(&peer->transmit_handshake_work) FD: 138 BD: 19 +.+.: (work_completion)(&peer->transmit_handshake_work) ->tk_core.seq.seqcount ->&wg->static_identity.lock ->&cookie->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&peer->endpoint_lock ->batched_entropy_u8.lock ->&rq->__lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->rcu_node_0 ->kfence_freelist_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 130 +...: &table->lock#2 FD: 28 BD: 57 ++++: &cookie->lock ->&rq->__lock ->rcu_node_0 FD: 140 BD: 1 +.+.: (wq_completion)wg-kex-wg1 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 1 BD: 117 +.-.: &r->producer_lock#2 FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg0#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 175 BD: 37 +.+.: (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->&r->consumer_lock#2 ->&wg->static_identity.lock ->&peer->endpoint_lock ->tk_core.seq.seqcount ->&cookie->lock ->&handshake->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&rq->__lock ->&list->lock#14 ->rcu_node_0 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 107 +.+.: &r->consumer_lock#2 FD: 5 BD: 129 +.-.: &peer->keypairs.keypair_update_lock ->&table->lock#2 ->&obj_hash[i].lock ->pool_lock#2 FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg1#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg1 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 31 BD: 19 +.+.: (work_completion)(&peer->transmit_packet_work) ->&obj_hash[i].lock ->&peer->endpoint_lock ->&base->lock ->batched_entropy_u8.lock ->&rq->__lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg0 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 1 BD: 1 +.-.: &keypair->receiving_counter.lock FD: 136 BD: 10 +.+.: &data->mtx ->fs_reclaim ->remove_cache_srcu ->pool_lock#2 ->&rfkill->lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 140 BD: 1 +.+.: (wq_completion)wg-kex-wg2 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg2#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 1 BD: 3591 +.-.: &entry->crc_lock FD: 1 BD: 80 ....: &wdev->event_lock FD: 1 BD: 73 +.+.: (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) FD: 48 BD: 81 +.+.: &local->key_mtx ->&rq->__lock ->&obj_hash[i].lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 30 BD: 88 ..-.: &rdev->wiphy_work_lock FD: 2 BD: 74 +...: &bat_priv->tt.last_changeset_lock ->pool_lock#2 FD: 1 BD: 73 ....: (&dwork->timer) FD: 1 BD: 73 +.+.: (work_completion)(&(&link->color_collision_detect_work)->work) FD: 334 BD: 2 +.+.: (work_completion)(&rdev->wiphy_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 21 BD: 1 +.+.: (wq_completion)phy3 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 81 ..-.: &list->lock#15 FD: 1 BD: 80 +.-.: &ifibss->incomplete_lock FD: 164 BD: 85 +.+.: &local->mtx ->&local->chanctx_mtx ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->&c->lock ->&local->ack_status_lock ->&local->queue_stop_reason_lock ->&n->list_lock ->&rq->__lock ->&data->mutex ->&local->iflist_mtx ->&local->filter_lock ->&obj_hash[i].lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rdev->wiphy_work_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&____s->seqcount#2 ->remove_cache_srcu ->&lock->wait_lock ->nl_table_lock ->nl_table_wait.lock FD: 834 BD: 1 +.+.: (wq_completion)cfg80211 ->(work_completion)(&rdev->event_work) ->(work_completion)(&(&rdev->dfs_update_channels_wk)->work) FD: 334 BD: 2 +.+.: (work_completion)(&rdev->event_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock FD: 158 BD: 2 +.+.: wireless_nlevent_work ->net_rwsem FD: 167 BD: 1 +.+.: (wq_completion)phy4 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->scan_work)->work) FD: 1 BD: 3640 ..-.: &list->lock#16 FD: 33 BD: 1 +.-.: &local->rx_path_lock ->&list->lock#15 ->&rdev->wiphy_work_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rdev->mgmt_registrations_lock FD: 19 BD: 89 +...: &sta->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 19 BD: 80 +.-.: &sta->rate_ctrl_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock FD: 171 BD: 80 +.+.: &local->sta_mtx ->fs_reclaim ->pool_lock#2 ->&local->chanctx_mtx ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&obj_hash[i].lock ->nl_table_lock ->&____s->seqcount ->nl_table_wait.lock ->&c->lock ->&____s->seqcount#2 ->&fq->lock ->&local->active_txq_lock[i] ->(work_completion)(&sta->drv_deliver_wk) ->&sta->lock ->krc.lock ->remove_cache_srcu ->&n->list_lock ->&rq->__lock ->&sta->ampdu_mlme.mtx ->(work_completion)(&sta->ampdu_mlme.work) ->&local->key_mtx ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->mount_lock ->quarantine_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 152 BD: 1 +.+.: &type->s_umount_key#46/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#32 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->binderfs_minors_mutex ->&dentry->d_lock ->&sb->s_type->i_mutex_key#20 ->&____s->seqcount#2 ->&rq->__lock FD: 40 BD: 3 +.+.: &sb->s_type->i_lock_key#32 ->&dentry->d_lock FD: 2 BD: 2 +.+.: binderfs_minors_mutex ->binderfs_minors.xa_lock FD: 1 BD: 3 ....: binderfs_minors.xa_lock FD: 140 BD: 2 +.+.: &sb->s_type->i_mutex_key#20 ->&sb->s_type->i_lock_key#32 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock FD: 1 BD: 3 +.+.: iunique_lock FD: 812 BD: 3 +.+.: &type->i_mutex_dir_key#6/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->&xa->xa_lock#13 ->&obj_hash[i].lock ->stock_lock ->&c->lock ->&n->list_lock ->&rq->__lock ->&sb->s_type->i_lock_key#30 FD: 1 BD: 18 ....: task_group_lock FD: 139 BD: 1 .+.+: kn->active#53 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount#2 ->&n->list_lock FD: 139 BD: 1 ++++: kn->active#54 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 3 BD: 136 ..-.: cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&obj_hash[i].lock FD: 28 BD: 136 ....: cgroup_threadgroup_rwsem.waiters.lock ->&p->pi_lock FD: 1 BD: 19 +.+.: (wq_completion)cpuset_migrate_mm FD: 812 BD: 3 +.+.: &type->i_mutex_dir_key#7/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->pool_lock#2 ->&xa->xa_lock#13 ->&obj_hash[i].lock ->stock_lock ->cgroup_mutex.wait_lock ->&p->pi_lock ->&sb->s_type->i_lock_key#31 ->&c->lock FD: 139 BD: 1 ++++: kn->active#55 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->stock_lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&____s->seqcount FD: 1 BD: 137 ....: cpuset_attach_wq.lock FD: 2 BD: 4297 ..-.: stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 139 BD: 1 .+.+: kn->active#56 ->&rq->__lock ->fs_reclaim ->stock_lock ->&c->lock ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&____s->seqcount#2 ->&n->list_lock FD: 140 BD: 1 .+.+: kn->active#57 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->memcg_max_mutex ->&c->lock FD: 1 BD: 8 +.+.: memcg_max_mutex FD: 1 BD: 3631 +.-.: &local->active_txq_lock[i] FD: 37 BD: 3622 +.-.: &local->handle_wake_tx_queue_lock ->&local->active_txq_lock[i] ->&local->queue_stop_reason_lock ->&fq->lock ->tk_core.seq.seqcount ->hwsim_radio_lock ->&list->lock#16 FD: 1 BD: 3638 ..-.: &local->queue_stop_reason_lock FD: 1 BD: 6 ....: &per_cpu(xt_recseq, i) FD: 291 BD: 1 +.+.: nf_nat_proto_mutex ->fs_reclaim ->pool_lock#2 ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->stock_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&rq->__lock FD: 1 BD: 94 ..-.: elock-AF_INET6 FD: 30 BD: 1 +.+.: loop_validate_mutex ->&lo->lo_mutex ->&rq->__lock ->loop_validate_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 237 BD: 69 +.+.: &net->xdp.lock ->&rq->__lock ->&xs->mutex ->&lock->wait_lock ->&p->pi_lock FD: 1 BD: 1 +.+.: crypto_cfg_mutex FD: 1 BD: 3664 +.-.: &nf_nat_locks[i] FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg0#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 3 +...: &xs->map_list_lock FD: 236 BD: 70 +.+.: &xs->mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->&c->lock ->init_mm.page_table_lock ->umem_ida.xa_lock ->&rq->__lock ->&mm->mmap_lock ->&sem->wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&lock->wait_lock ->&n->list_lock ->&____s->seqcount#2 ->remove_cache_srcu FD: 1 BD: 3 +...: clock-AF_XDP FD: 1 BD: 82 +...: &msk->pm.lock FD: 524 BD: 4 +.+.: (work_completion)(&msk->work) ->sk_lock-AF_INET6 ->slock-AF_INET6 ->sk_lock-AF_INET ->slock-AF_INET ->&rq->__lock FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg1#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg0#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg1#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg2#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg2#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 1 BD: 4 +.+.: genl_mutex.wait_lock FD: 38 BD: 1 +.-.: (&peer->timer_persistent_keepalive) ->pool_lock#2 ->&list->lock#14 ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->init_task.mems_allowed_seq.seqcount ->&zone->lock FD: 21 BD: 1 +.+.: (wq_completion)phy5 ->(work_completion)(&local->reconfig_filter) FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg0#5 ->(work_completion)(&peer->transmit_handshake_work) FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg1#5 ->(work_completion)(&peer->transmit_handshake_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg0#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg1#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg2#5 ->(work_completion)(&peer->transmit_handshake_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg2#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 168 BD: 1 +.+.: (wq_completion)phy6 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->scan_work)->work) ->(work_completion)(&(&local->roc_work)->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 4 BD: 2 +.-.: icmp_global.lock ->batched_entropy_u8.lock FD: 28 BD: 122 ....: &sk->sk_lock.wq ->&p->pi_lock FD: 193 BD: 1 +.+.: (wq_completion)phy7 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&link->csa_finalize_work) FD: 1 BD: 71 ..-.: &list->lock#17 FD: 1 BD: 71 ..-.: &list->lock#18 FD: 1 BD: 1 ..-.: &list->lock#19 FD: 1 BD: 4 +.-.: x25_list_lock FD: 1 BD: 1 +.-.: x25_forward_list_lock FD: 140 BD: 1 +.+.: (wq_completion)wg-kex-wg0#7 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 140 BD: 1 +.+.: (wq_completion)wg-kex-wg0#8 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 140 BD: 1 +.+.: (wq_completion)wg-kex-wg1#7 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) ->&rq->__lock FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg0#9 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg1#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 140 BD: 1 +.+.: (wq_completion)wg-kex-wg2#7 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg2#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 13 BD: 238 +...: link_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 194 BD: 1 +.+.: (wq_completion)phy8 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->scan_work)->work) ->(work_completion)(&link->csa_finalize_work) FD: 140 BD: 1 +.+.: (wq_completion)wg-kex-wg1#9 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg0#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg1#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 140 BD: 1 +.+.: (wq_completion)wg-kex-wg2#9 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg2#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 27 BD: 81 +.+.: (work_completion)(&sta->drv_deliver_wk) ->&rq->__lock FD: 1 BD: 3613 .+.-: &table->lock#3 FD: 193 BD: 1 +.+.: (wq_completion)phy9 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&link->csa_finalize_work) FD: 21 BD: 1 +.+.: (wq_completion)phy10 ->(work_completion)(&local->reconfig_filter) FD: 12 BD: 91 +...: &sctp_port_hashtable[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 82 ....: &asoc->wait FD: 366 BD: 69 +.+.: __ip_vs_mutex ->&rq->__lock ->(console_sem).lock ->&ipvs->dest_trash_lock ->&mm->mmap_lock ->ip_vs_sched_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->&c->lock ->&____s->seqcount ->pcpu_alloc_mutex ->&n->list_lock ->__ip_vs_mutex.wait_lock ->ipvs->est_mutex ->rcu_node_0 ->&rcu_state.expedited_wq FD: 43 BD: 101 ...-: &f->f_owner.lock FD: 195 BD: 1 +.+.: (wq_completion)phy11 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&link->csa_finalize_work) ->(work_completion)(&(&local->scan_work)->work) ->(work_completion)(&ifmgd->monitor_work) FD: 21 BD: 1 +.+.: (wq_completion)phy12 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 18 +.+.: cgroup_mutex.wait_lock FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg0#11 ->(work_completion)(&peer->transmit_handshake_work) FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg1#11 ->(work_completion)(&peer->transmit_handshake_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg0#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg1#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 139 BD: 1 +.+.: (wq_completion)wg-kex-wg2#11 ->(work_completion)(&peer->transmit_handshake_work) FD: 176 BD: 1 +.+.: (wq_completion)wg-kex-wg2#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 177 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 21 BD: 1 +.+.: (wq_completion)phy13 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 5 +.+.: &pnsocks.lock FD: 27 BD: 4 +.+.: resource_mutex ->&rq->__lock FD: 1 BD: 3 +...: clock-AF_PHONET FD: 1 BD: 3 ....: rlock-AF_PHONET FD: 199 BD: 75 +.+.: sk_lock-AF_INET6/1 ->slock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#20 ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&____s->seqcount ->krc.lock ->rcu_node_0 ->&rq->__lock ->&c->lock ->&____s->seqcount#2 ->fs_reclaim ->tk_core.seq.seqcount ->&list->lock#21 ->sctp_assocs_id_lock ->&n->list_lock ->&rcu_state.expedited_wq ->remove_cache_srcu ->&cfs_rq->removed.lock ->quarantine_lock ->&data->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&zone->lock FD: 1 BD: 78 +.-.: rlock-AF_INET6 FD: 1 BD: 78 ....: &list->lock#20 FD: 65 BD: 76 +.-.: slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&sctp_port_hashtable[i].lock ->clock-AF_INET6 ->tk_core.seq.seqcount ->&f->f_owner.lock ->&base->lock ->&c->lock ->key#23 ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&zone->lock ->quarantine_lock ->&data->lock ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 85 ++.-: &sctp_ep_hashtable[i].lock FD: 167 BD: 1 +.+.: (wq_completion)phy14 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->scan_work)->work) FD: 1 BD: 4259 +.+.: &pa->pa_lock#2 FD: 32 BD: 1 ..-.: &(&bat_priv->dat.work)->timer FD: 32 BD: 1 ..-.: &(&bat_priv->bla.work)->timer FD: 31 BD: 2 +.+.: (work_completion)(&(&bat_priv->dat.work)->work) ->&hash->list_locks[i] ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 3 +...: &hash->list_locks[i] FD: 34 BD: 2 +.+.: (work_completion)(&(&bat_priv->bla.work)->work) ->&rq->__lock ->key#21 ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->crngs.lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 1 BD: 1 ....: _rs.lock FD: 139 BD: 70 +.+.: &block->lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 141 BD: 69 ++++: &block->cb_lock ->&rq->__lock ->flow_indr_block_lock ->&tp->lock FD: 137 BD: 70 +.+.: flow_indr_block_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&rq->__lock ->&n->list_lock FD: 1 BD: 71 +...: &qdisc_xmit_lock_key FD: 200 BD: 139 .+.+: sb_pagefaults ->tk_core.seq.seqcount ->&rq->__lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->mapping.invalidate_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->remove_cache_srcu ->&journal->j_wait_transaction_locked ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->rcu_node_0 ->&n->list_lock ->&sb->s_type->i_lock_key#22 ->&cfs_rq->removed.lock ->&sem->wait_lock ->&p->pi_lock FD: 1 BD: 3 +...: l2tp_ip_lock FD: 1 BD: 92 ..-.: key#22 FD: 20 BD: 73 +.-.: _xmit_NONE#2 ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock ->&____s->seqcount ->quarantine_lock FD: 13 BD: 239 +...: sctp_assocs_id_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 1 BD: 93 ..-.: &list->lock#21 FD: 1 BD: 4 +.+.: &q->instances_lock FD: 1 BD: 4 +...: &log->instances_lock FD: 1 BD: 137 +.+.: freezer_mutex.wait_lock FD: 97 BD: 3 +.+.: sk_lock-AF_INET/1 ->slock-AF_INET ->rlock-AF_INET ->&list->lock#20 ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->krc.lock ->&cfs_rq->removed.lock FD: 1 BD: 4 ....: rlock-AF_INET FD: 76 BD: 1 +.-.: (&peer->timer_retransmit_handshake) ->&peer->endpoint_lock ->&obj_hash[i].lock ->&list->lock#14 FD: 296 BD: 8 +.+.: tracepoints_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->cpu_hotplug_lock ->tracepoint_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->&obj_hash[i].lock ->&c->lock ->tasklist_lock ->tracepoint_srcu ->&x->wait#3 ->&n->list_lock ->tracepoints_mutex.wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&____s->seqcount#2 FD: 1 BD: 148 +.+.: text_mutex.wait_lock FD: 1 BD: 141 +.+.: jump_label_mutex.wait_lock FD: 1 BD: 1 ....: _rs.lock#2 FD: 179 BD: 150 +.+.: &journal->j_barrier ->&journal->j_state_lock ->jbd2_handle ->&journal->j_wait_commit ->&rq->__lock ->&journal->j_wait_done_commit ->&journal->j_list_lock ->&journal->j_checkpoint_mutex ->&lock->wait_lock FD: 166 BD: 1 +.+.: &net->xfrm.xfrm_cfg_mutex ->(console_sem).lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->crypto_alg_sem ->(kmod_concurrent_max).lock ->&x->wait#17 ->&lock->wait_lock ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->(&timer.timer) ->crypto_default_null_skcipher_lock ->&net->xfrm.xfrm_state_lock ->&n->list_lock ->remove_cache_srcu ->pfkey_mutex ->rlock-AF_KEY ->pfkey_mutex.wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&net->xfrm.xfrm_policy_lock ->&policy->lock ->&list->lock#29 ->&data->lock ->xfrm_state_gc_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->running_helpers_waitq.lock ->crypto_default_rng_lock ->&drbg->drbg_mutex ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 15 BD: 15 ....: tracepoint_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) ->&base->lock FD: 30 BD: 3 +.-.: (&sdp->delay_work) FD: 1 BD: 3 +...: base_sockets.lock FD: 1 BD: 4 +...: clock-AF_ISDN FD: 297 BD: 3 +.+.: sched_register_mutex ->tracepoints_mutex FD: 36 BD: 1 +.-.: (&peer->timer_send_keepalive) ->pool_lock#2 ->&list->lock#14 ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock FD: 33 BD: 2 +.-.: &q->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&c->lock ->&data->lock FD: 1 BD: 69 +.+.: mirred_list_lock FD: 10 BD: 69 +...: &idev->mc_query_lock ->&obj_hash[i].lock FD: 27 BD: 69 +.+.: (work_completion)(&(&idev->mc_report_work)->work) ->&rq->__lock FD: 1 BD: 69 +...: &idev->mc_report_lock FD: 28 BD: 69 +.+.: &pnn->pndevs.lock ->&rq->__lock ->pool_lock#2 FD: 27 BD: 69 +.+.: &pnn->routes.lock ->&rq->__lock FD: 1 BD: 7 ....: netdev_unregistering_wq.lock FD: 1 BD: 1 ....: _rs.lock#3 FD: 1 BD: 71 +...: _xmit_LOOPBACK#2 FD: 1 BD: 70 +...: &ipvs->dest_trash_lock FD: 1 BD: 69 +.+.: flowtable_lock FD: 1 BD: 3 +.+.: &net->ipv4.ra_mutex FD: 1 BD: 5 +.+.: nf_sockopt_mutex.wait_lock FD: 28 BD: 150 ....: &sem->waiters ->&p->pi_lock FD: 1 BD: 75 +.+.: tcpv6_prot_mutex FD: 1 BD: 75 +...: device_spinlock FD: 141 BD: 8 +.+.: crypto_default_null_skcipher_lock ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 18 BD: 70 +...: &net->xfrm.xfrm_state_lock ->hrtimer_bases.lock ->&obj_hash[i].lock ->&base->lock FD: 51 BD: 1 +.-.: (&p->forward_delay_timer) ->&br->lock FD: 43 BD: 70 +.+.: (work_completion)(&br->mcast_gc_work) ->&br->multicast_lock ->(&p->rexmit_timer) ->&obj_hash[i].lock ->&base->lock ->(&p->timer) ->pool_lock#2 ->krc.lock ->(&mp->timer) ->&rq->__lock FD: 1 BD: 71 ....: (&p->rexmit_timer) FD: 39 BD: 71 +.-.: (&p->timer) ->&br->multicast_lock FD: 39 BD: 71 +.-.: (&mp->timer) ->&br->multicast_lock FD: 1 BD: 69 ....: (&pmctx->ip6_mc_router_timer) FD: 1 BD: 69 ....: (&pmctx->ip4_mc_router_timer) FD: 1 BD: 3 +...: clock-AF_NETROM FD: 40 BD: 3 +.+.: sk_lock-AF_NETROM ->slock-AF_NETROM ->&obj_hash[i].lock ->wlock-AF_NETROM ->&list->lock#22 ->nr_list_lock ->rlock-AF_NETROM ->ax25_uid_lock ->pool_lock#2 ->&list->lock#35 ->&base->lock ->&rq->__lock ->&data->lock ->&ei->socket.wq.wait ->&c->lock ->&n->list_lock FD: 37 BD: 6 +.-.: slock-AF_NETROM ->&c->lock ->pool_lock#2 ->&list->lock#35 ->&obj_hash[i].lock ->&base->lock ->&data->lock ->rlock-AF_NETROM ->wlock-AF_NETROM ->&list->lock#22 FD: 1 BD: 7 ..-.: wlock-AF_NETROM FD: 1 BD: 7 ..-.: &list->lock#22 FD: 1 BD: 6 +.-.: nr_list_lock FD: 1 BD: 7 ..-.: rlock-AF_NETROM FD: 1 BD: 8 +.-.: &x->lock FD: 2 BD: 7 +.-.: (&x->rtimer) ->&x->lock FD: 1 BD: 82 ....: fastopen_seqlock.seqcount FD: 142 BD: 137 +.+.: &sb->s_type->i_mutex_key#21 ->tk_core.seq.seqcount ->fs_reclaim ->pool_lock#2 ->hugetlb_lock ->&rq->__lock ->&____s->seqcount ->&c->lock ->&n->list_lock FD: 32 BD: 139 +.+.: &hugetlbfs_i_mmap_rwsem_key ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 ->ptlock_ptr(page) ->&____s->seqcount FD: 141 BD: 137 +.+.: &hugetlb_fault_mutex_table[i] ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->stock_lock ->&mm->page_table_lock ->&resv_map->lock ->hugetlb_lock ->&rq->__lock ->&anon_vma->rwsem ->ptlock_ptr(page) ->&c->lock ->&vma_lock->rw_sema ->&sb->s_type->i_lock_key#16 FD: 4 BD: 139 +.+.: &resv_map->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 4 +...: smc_v4_hashinfo.lock FD: 300 BD: 3 +.+.: sk_lock-AF_SMC ->slock-AF_SMC ->k-sk_lock-AF_INET ->k-slock-AF_INET ->smc_v4_hashinfo.lock ->clock-AF_SMC ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_SMC FD: 72 BD: 1 .+.+: sb_writers#13 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#16 ->&wb->list_lock FD: 1 BD: 3 +.+.: (work_completion)(&smc->connect_work) FD: 828 BD: 3 +.+.: &smc->clcsock_release_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->stock_lock ->&rq->__lock ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->nf_sockopt_mutex ->&mm->mmap_lock ->rtnl_mutex ->&c->lock ->__ip_vs_mutex ->__ip_vs_mutex.wait_lock ->&p->pi_lock ->fs_reclaim ->ebt_mutex ->(kmod_concurrent_max).lock ->&x->wait#17 ->running_helpers_waitq.lock FD: 1 BD: 4 +...: clock-AF_SMC FD: 139 BD: 138 ++++: &vma_lock->rw_sema ->&hugetlbfs_i_mmap_rwsem_key ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->stock_lock ->&mm->page_table_lock ->&resv_map->lock ->hugetlb_lock ->&xa->xa_lock#7 ->&sb->s_type->i_lock_key#16 ->ptlock_ptr(page) ->&c->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->remove_cache_srcu ->&n->list_lock FD: 6 BD: 139 +.+.: &po->pg_vec_lock ->rlock-AF_PACKET FD: 1 BD: 7 ..-.: rlock-AF_RXRPC FD: 47 BD: 3595 +.-.: &bond->mode_lock ->(console_sem).lock ->&c->lock ->pool_lock#2 FD: 31 BD: 73 +.+.: (work_completion)(&(&bond->mii_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 51 BD: 69 +.+.: (work_completion)(&(&bond->ad_work)->work) ->&bond->mode_lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 FD: 1 BD: 1 ....: net_ratelimit_state.lock FD: 32 BD: 1 ..-.: &(&bond->mii_work)->timer FD: 1 BD: 71 +...: &qdisc_xmit_lock_key#2 FD: 30 BD: 74 +.+.: (work_completion)(&(&bond->arp_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 51 BD: 73 +.+.: (work_completion)(&(&bond->alb_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 27 BD: 69 +.+.: (work_completion)(&(&bond->slave_arr_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 229 BD: 3570 +.+.: (work_completion)(&(&bond->mcast_work)->work) ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&p->pi_lock ->rtnl_mutex.wait_lock ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->stock_lock ->&rcu_state.expedited_wq ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 71 +...: &batadv_netdev_xmit_lock_key FD: 1 BD: 3 +...: clock-AF_RXRPC FD: 1 BD: 1 +...: &bat_priv->gw.list_lock FD: 1 BD: 1 +...: &bat_priv->forw_bcast_list_lock FD: 1 BD: 1 +.+.: (work_completion)(&(&bat_priv->bat_v.ogm_wq)->work) FD: 27 BD: 1 +.+.: &bat_priv->bat_v.ogm_buff_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 28 BD: 70 ....: &tfile->socket.wq.wait ->&p->pi_lock FD: 27 BD: 3 +.+.: (work_completion)(&strp->work) ->&rq->__lock FD: 1 BD: 69 ....: wlock-AF_UNSPEC FD: 1 BD: 69 ....: elock-AF_UNSPEC FD: 1 BD: 1 +.+.: &mq_lock FD: 132 BD: 2 +.+.: free_ipc_work ->rcu_node_0 ->&obj_hash[i].lock ->&pool->lock ->&rq->__lock ->mount_lock ->&fsnotify_mark_srcu ->&type->s_umount_key#47 ->unnamed_dev_ida.xa_lock ->list_lrus_mutex ->&xa->xa_lock#13 ->pool_lock#2 ->sb_lock ->mnt_id_ida.xa_lock ->&ids->rwsem ->(work_completion)(&ht->run_work) ->&ht->mutex ->percpu_counters_lock ->pcpu_lock ->sysctl_lock ->proc_inum_ida.xa_lock ->stock_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->quarantine_lock FD: 137 BD: 3 +.+.: &type->s_umount_key#47 ->shrinker_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 1 BD: 3 +.+.: &ids->rwsem FD: 980 BD: 1 +.+.: (wq_completion)netns ->net_cleanup_work FD: 979 BD: 2 +.+.: net_cleanup_work ->pernet_ops_rwsem ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&dir->lock ->stock_lock ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rq->__lock FD: 1 BD: 5 +...: &net->nsid_lock FD: 1 BD: 5 +...: &tn->node_list_lock FD: 27 BD: 5 +.+.: netns_bpf_mutex ->&rq->__lock FD: 1 BD: 5 ....: (&net->fs_probe_timer) FD: 1 BD: 7 ++++: &net->cells_lock FD: 1 BD: 5 ....: (&net->cells_timer) FD: 33 BD: 1 +.+.: (wq_completion)afs ->(work_completion)(&net->cells_manager) ->(work_completion)(&net->fs_manager) FD: 30 BD: 2 +.+.: (work_completion)(&net->cells_manager) ->&net->cells_lock ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 5 ....: (&net->fs_timer) FD: 30 BD: 2 +.+.: (work_completion)(&net->fs_manager) ->&(&net->fs_lock)->lock ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 3 +.+.: &(&net->fs_lock)->lock FD: 1 BD: 6 +.+.: &rx->incoming_lock FD: 1 BD: 6 +.+.: &call->notify_lock FD: 1 BD: 6 ....: (rxrpc_call_limiter).lock FD: 1 BD: 6 +.+.: &rx->recvmsg_lock FD: 1 BD: 6 ....: (&call->timer) FD: 1 BD: 6 ....: &list->lock#23 FD: 1 BD: 5 +.+.: (wq_completion)kafsd FD: 1 BD: 5 +...: k-clock-AF_RXRPC FD: 1 BD: 1 ....: (&local->client_conn_reap_timer) FD: 1 BD: 1 ....: &list->lock#24 FD: 1 BD: 9 +.+.: (work_completion)(&data->gc_work) FD: 1 BD: 5 +.+.: (work_completion)(&ovs_net->dp_notify_work) FD: 1 BD: 5 +...: &srv->idr_lock FD: 1 BD: 5 ....: (&rxnet->service_conn_reap_timer) FD: 1 BD: 76 +.+.: rcu_state.barrier_mutex.wait_lock FD: 1 BD: 10 +...: &nt->cluster_scope_lock FD: 1 BD: 5 +.+.: (work_completion)(&tn->work) FD: 1 BD: 5 +.+.: (work_completion)(&(&c->work)->work) FD: 1 BD: 5 +.+.: &fn->fou_lock FD: 231 BD: 5 +.+.: (wq_completion)krdsd ->(work_completion)(&rtn->rds_tcp_accept_w) ->(work_completion)(&(&cp->cp_send_w)->work) ->(work_completion)(&(&cp->cp_recv_w)->work) ->(work_completion)(&cp->cp_down_w) FD: 225 BD: 6 +.+.: (work_completion)(&rtn->rds_tcp_accept_w) ->fs_reclaim ->&c->lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&obj_hash[i].lock ->once_lock ->&rq->__lock ->rcu_node_0 ->&____s->seqcount#2 ->&____s->seqcount ->rds_cong_lock ->&n->list_lock ->rds_trans_sem ->&tc->t_conn_path_lock ->&xa->xa_lock#7 ->&fsnotify_mark_srcu FD: 1 BD: 7 ....: rds_tcp_conn_lock FD: 1 BD: 5 ....: loop_conns_lock FD: 14 BD: 5 +.+.: (wq_completion)l2tp ->(work_completion)(&tunnel->del_work) FD: 2 BD: 8 +.+.: (work_completion)(&rxnet->service_conn_reaper) ->&rxnet->conn_lock FD: 1 BD: 79 ....: key#23 FD: 303 BD: 5 +.+.: ipvs->sync_mutex ->&mm->mmap_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&n->list_lock ->&rq->__lock ->&dir->lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&ipvs->sync_buff_lock ->&zone->lock ->&____s->seqcount ->&____s->seqcount#2 ->rtnl_mutex.wait_lock FD: 838 BD: 11 ++++: rdma_nets_rwsem ->rdma_nets.xa_lock ->&device->compat_devs_mutex ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 5 +...: k-clock-AF_NETLINK FD: 1 BD: 5 +.+.: &hn->hn_lock FD: 50 BD: 69 +.+.: &caifn->caifdevs.lock ->&obj_hash[i].lock ->&rnp->exp_wq[0] ->&rq->__lock ->pool_lock#2 ->&this->info_list_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&rnp->exp_wq[2] ->quarantine_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 144 BD: 1 +.+.: (wq_completion)inet_frag_wq ->(work_completion)(&fqdir->destroy_work) FD: 143 BD: 2 +.+.: (work_completion)(&fqdir->destroy_work) ->(work_completion)(&ht->run_work) ->&ht->mutex FD: 44 BD: 2 +.+.: fqdir_free_work ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->&base->lock ->quarantine_lock FD: 1 BD: 71 +...: &this->info_list_lock FD: 1 BD: 5 +.+.: &pnetids_ndev->lock FD: 207 BD: 75 +.+.: k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#20 ->&rq->__lock ->pool_lock#2 ->&dir->lock ->fs_reclaim ->&c->lock ->&obj_hash[i].lock ->slock-AF_INET6 ->k-clock-AF_INET6 ->&h->lhash2[i].lock ->&queue->rskq_lock ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 49 BD: 76 +.-.: k-slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->k-clock-AF_INET6 ->tk_core.seq.seqcount ->clock-AF_INET6 ->&base->lock ->&c->lock ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->&____s->seqcount ->&n->list_lock ->key#23 FD: 1 BD: 3 +...: rds_sock_lock FD: 1 BD: 3 +...: clock-AF_RDS FD: 1 BD: 3 ....: &rs->rs_recv_lock FD: 1 BD: 3 ....: rds_cong_monitor_lock FD: 1 BD: 10 ....: rds_cong_lock FD: 1 BD: 3 ....: &rs->rs_lock FD: 1 BD: 3 ....: &rs->rs_rdma_lock FD: 1 BD: 3 ....: &q->lock#2 FD: 30 BD: 1 ..-.: &(&net->ipv6.addr_chk_work)->timer FD: 72 BD: 1 .+.+: sb_writers#14 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#27 ->&wb->list_lock FD: 1 BD: 5 +.+.: &sn->gssp_lock FD: 1 BD: 8 +.+.: &cd->hash_lock FD: 156 BD: 6 +.+.: xfrm_state_gc_work ->xfrm_state_gc_lock ->&obj_hash[i].lock ->(&x->rtimer) ->&base->lock ->pool_lock#2 ->&rnp->exp_lock ->rcu_state.exp_mutex ->crypto_default_null_skcipher_lock FD: 1 BD: 5 +...: ip6_fl_lock FD: 1 BD: 5 ....: (&net->ipv6.ip6_fib_timer) FD: 1 BD: 69 ....: (&mrt->ipmr_expire_timer) FD: 1 BD: 5 ....: (&ipvs->dest_trash_timer) FD: 1 BD: 5 +.+.: (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) FD: 145 BD: 6 +.+.: (work_completion)(&(&ipvs->est_reload_work)->work) ->ipvs->est_mutex FD: 1 BD: 6 +...: recent_lock FD: 143 BD: 5 +.+.: hashlimit_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&rq->__lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->&base->lock ->&ent->pde_unload_lock ->&n->list_lock FD: 37 BD: 6 +.+.: (work_completion)(&(&cnet->ecache.dwork)->work) ->&cnet->ecache.dying_lock ->&obj_hash[i].lock ->pool_lock#2 ->&c->lock ->&rq->__lock ->&n->list_lock ->&base->lock FD: 1 BD: 5 +.+.: (work_completion)(&net->xfrm.policy_hash_work) FD: 34 BD: 94 +...: &net->xfrm.xfrm_policy_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&____s->seqcount#13 ->krc.lock ->&c->lock FD: 1 BD: 5 +.+.: (work_completion)(&net->xfrm.state_hash_work) FD: 1 BD: 71 +...: &qdisc_xmit_lock_key#3 FD: 1 BD: 4840 .-.-: init_task.mems_allowed_seq.seqcount FD: 2 BD: 135 +.+.: (work_completion)(flush) ->&list->lock#5 FD: 1 BD: 4 +...: slock-AF_PHONET FD: 3 BD: 4 +.+.: port_mutex#2 ->local_port_range_lock.seqcount ->&pnsocks.lock FD: 1 BD: 5 ....: local_port_range_lock.seqcount FD: 1 BD: 3 ....: &list->lock#25 FD: 1 BD: 82 +...: &token_hash[i].lock FD: 19 BD: 1 +...: &nr_netdev_xmit_lock_key ->nr_node_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock ->quarantine_lock FD: 1 BD: 75 +...: ip6_sk_fl_lock FD: 1 BD: 71 +...: _xmit_IPGRE#2 FD: 137 BD: 1 ++++: kn->active#58 ->fs_reclaim ->remove_cache_srcu ->stock_lock ->&rq->__lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 41 BD: 3 +.+.: sk_lock-AF_PHONET ->&rq->__lock ->slock-AF_PHONET ->port_mutex#2 ->&pnsocks.lock ->resource_mutex ->&obj_hash[i].lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 67 BD: 3588 +...: &macvlan_netdev_addr_lock_key/2 ->&dev_addr_list_lock_key/1 ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&c->lock ->&macsec_netdev_addr_lock_key/1 FD: 1 BD: 6 +...: l2tp_ip6_lock FD: 1 BD: 2 +...: nr_node_list_lock FD: 2 BD: 9 +.+.: &id_priv->handler_mutex ->&id_priv->lock FD: 1 BD: 9 ....: &x->wait#27 FD: 2 BD: 7 ....: rds_conn_lock ->rds_cong_lock FD: 49 BD: 7 +.+.: &tc->t_conn_path_lock ->clock-AF_INET6 ->&rq->__lock ->&cp->cp_lock ->pool_lock#2 FD: 1 BD: 130 +...: rds_tcp_tc_list_lock FD: 1 BD: 104 ..-.: &cp->cp_lock FD: 209 BD: 6 +.+.: (work_completion)(&(&cp->cp_send_w)->work) ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&c->lock ->pool_lock#2 ->&rq->__lock ->&obj_hash[i].lock ->&cp->cp_lock ->&n->list_lock FD: 163 BD: 1 +.+.: list_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&obj_hash[i].lock ->&base->lock ->(&info->timer->timer) ->(work_completion)(&info->timer->work) ->&rq->__lock ->list_mutex.wait_lock FD: 1 BD: 104 ..-.: &rm->m_rs_lock FD: 209 BD: 6 +.+.: (work_completion)(&(&cp->cp_recv_w)->work) ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 FD: 1 BD: 104 ..-.: &list->lock#26 FD: 1 BD: 2 ....: (&info->timer->timer) FD: 1 BD: 2 +.+.: (work_completion)(&info->timer->work) FD: 1 BD: 110 ....: key#24 FD: 1 BD: 2 +.+.: list_mutex.wait_lock FD: 1 BD: 4 +.+.: smcd_dev_list.mutex FD: 1 BD: 1 +...: clock-AF_LLC FD: 207 BD: 6 +.+.: (work_completion)(&cp->cp_down_w) ->&cp->cp_cm_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&rq->__lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&cp->cp_lock ->(work_completion)(&(&cp->cp_conn_w)->work) ->&list->lock#26 ->&data->lock FD: 247 BD: 3 +.+.: sk_lock-AF_LLC ->slock-AF_LLC ->llc_sap_list_lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&dir->lock#2 ->&sap->sk_lock ->wlock-AF_LLC ->&obj_hash[i].lock ->&base->lock ->&ei->socket.wq.wait ->&rq->__lock ->quarantine_lock ->&mm->mmap_lock ->&____s->seqcount#2 ->&____s->seqcount ->&data->lock FD: 1 BD: 7 +.+.: &cp->cp_cm_lock FD: 29 BD: 4 +...: slock-AF_LLC ->&sk->sk_lock.wq FD: 27 BD: 7 +.+.: (work_completion)(&(&cp->cp_conn_w)->work) ->&rq->__lock FD: 1 BD: 4 +.-.: &sap->sk_lock FD: 1 BD: 4 ....: wlock-AF_LLC FD: 1 BD: 3 ....: (&llc->rej_sent_timer.timer) FD: 1 BD: 3 ....: (&llc->pf_cycle_timer.timer) FD: 1 BD: 3 ....: (&llc->ack_timer.timer) FD: 1 BD: 3 ....: (&llc->busy_state_timer.timer) FD: 1 BD: 3 ....: rlock-AF_LLC FD: 1 BD: 3 ....: &list->lock#27 FD: 146 BD: 4 +.+.: pfkey_mutex ->&rq->__lock ->crypto_alg_sem ->(kmod_concurrent_max).lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&x->wait#17 ->running_helpers_waitq.lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->(&timer.timer) ->&n->list_lock ->&cfs_rq->removed.lock ->pfkey_mutex.wait_lock ->&____s->seqcount#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 4 ....: rlock-AF_KEY FD: 1 BD: 69 +...: &bond->ipsec_lock FD: 1 BD: 3 +...: clock-AF_KEY FD: 1 BD: 3 ....: wlock-AF_KEY FD: 161 BD: 1 +.+.: (wq_completion)bond4 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 2 +.+.: vlan_ioctl_mutex.wait_lock FD: 1 BD: 5 +.+.: pfkey_mutex.wait_lock FD: 1 BD: 71 +...: &vlan_netdev_xmit_lock_key FD: 1 BD: 3 +.+.: nfnl_grp_active_lock FD: 1 BD: 8 +.+.: nf_conntrack_mutex.wait_lock FD: 247 BD: 69 +.+.: sk_lock-AF_UNSPEC ->slock-AF_UNSPEC ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->stock_lock ->pcpu_alloc_mutex ->&mm->mmap_lock ->&obj_hash[i].lock ->pack_mutex ->batched_entropy_u32.lock ->&rq->__lock ->text_mutex ->&fp->aux->used_maps_mutex ->&c->lock ->&n->list_lock ->init_mm.page_table_lock FD: 33 BD: 9 +...: &cnet->ecache.dying_lock FD: 1 BD: 3 +...: clock-AF_ROSE FD: 33 BD: 3 +.+.: sk_lock-AF_ROSE ->slock-AF_ROSE ->rose_list_lock ->&obj_hash[i].lock ->&rq->__lock ->wlock-AF_ROSE ->&list->lock#28 ->rlock-AF_ROSE ->rose_node_list_lock FD: 1 BD: 4 +...: slock-AF_ROSE FD: 1 BD: 4 ....: wlock-AF_ROSE FD: 1 BD: 4 ....: &list->lock#28 FD: 1 BD: 4 +...: rose_list_lock FD: 1 BD: 4 ....: rlock-AF_ROSE FD: 30 BD: 1 ..-.: &(&cnet->ecache.dwork)->timer FD: 1 BD: 69 +...: _xmit_NETROM#2 FD: 1 BD: 70 +...: slock-AF_UNSPEC FD: 1 BD: 1 ....: _rs.lock#4 FD: 47 BD: 69 +...: &vlan_netdev_addr_lock_key/2 ->&dev_addr_list_lock_key#2/1 FD: 297 BD: 69 +.+.: bpf_dispatcher_xdp.mutex ->pack_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->bpf_lock ->text_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->&rq->__lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 1 BD: 3590 +.-.: &r->producer_lock#3 FD: 1 BD: 1 +.-.: &r->producer_lock#4 FD: 1 BD: 95 +...: &____s->seqcount#13 FD: 1 BD: 95 ++.-: &policy->lock FD: 1 BD: 94 ....: &list->lock#29 FD: 29 BD: 2 +.+.: (work_completion)(&(&hinfo->gc_work)->work) ->&hinfo->lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->rcu_node_0 FD: 1 BD: 3 +...: &hinfo->lock FD: 248 BD: 6 +.+.: sk_lock-AF_TIPC ->slock-AF_TIPC ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->clock-AF_TIPC ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&list->lock#31 ->&c->lock ->&n->list_lock ->&____s->seqcount ->&ei->socket.wq.wait ->&zone->lock ->rcu_node_0 ->&sem->wait_lock ->&p->pi_lock ->quarantine_lock ->remove_cache_srcu ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&list->lock#38 ->&data->lock ->tk_core.seq.seqcount ->&list->lock#5 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 34 BD: 7 +...: slock-AF_TIPC ->&list->lock#31 ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock FD: 1 BD: 7 +...: clock-AF_TIPC FD: 34 BD: 1 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/2 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&ei->socket.wq.wait FD: 42 BD: 17 +.+.: &chan->lock/1 ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->chan_list_lock ->rcu_node_0 ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->clock-AF_BLUETOOTH ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->pool_lock#2 ->&dir->lock ->quarantine_lock ->&cfs_rq->removed.lock FD: 30 BD: 18 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->&rq->__lock ->slock-AF_BLUETOOTH-BTPROTO_L2CAP FD: 1 BD: 1 +.+.: bpf_module_mutex FD: 12 BD: 86 ..-.: &local->ack_status_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 75 BD: 1 +.-.: (&peer->timer_new_handshake) ->&peer->endpoint_lock FD: 4 BD: 7 +...: vsock_table_lock ->batched_entropy_u32.lock FD: 33 BD: 7 +.+.: (work_completion)(&(&conn->disc_work)->work) ->pool_lock#2 ->&list->lock#6 ->&c->lock ->&n->list_lock FD: 250 BD: 5 +.+.: sk_lock-AF_VSOCK ->&rq->__lock ->slock-AF_VSOCK ->vsock_table_lock ->clock-AF_VSOCK ->rlock-AF_VSOCK ->fs_reclaim ->pool_lock#2 ->&vvs->rx_lock ->&data->lock ->&obj_hash[i].lock ->&mm->mmap_lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&list->lock#41 ->&pool->lock ->&dir->lock ->sk_lock-AF_VSOCK/1 ->&ei->socket.wq.wait ->&vvs->tx_lock ->&zone->lock ->&base->lock ->&cfs_rq->removed.lock FD: 29 BD: 7 +...: slock-AF_VSOCK ->&sk->sk_lock.wq FD: 1 BD: 7 +...: clock-AF_VSOCK FD: 1 BD: 7 ....: rlock-AF_VSOCK FD: 22 BD: 2 +.+.: (work_completion)(&nlk->work) ->&obj_hash[i].lock ->pool_lock#2 ->vmap_area_lock ->purge_vmap_area_lock ->rlock-AF_NETLINK ->&dir->lock ->&net->ipv6.fib6_walker_lock ->&data->lock ->&base->lock ->quarantine_lock FD: 252 BD: 1 +.+.: sk_lock-AF_ALG ->slock-AF_ALG ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&obj_hash[i].lock ->&dir->lock ->&ei->socket.wq.wait ->&c->lock ->&n->list_lock ->rcu_node_0 ->remove_cache_srcu ->&____s->seqcount#2 ->&____s->seqcount ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->sk_lock-AF_ALG/1 ->&cfs_rq->removed.lock ->&drbg->drbg_mutex FD: 29 BD: 3 +...: slock-AF_ALG ->&sk->sk_lock.wq FD: 1 BD: 69 ....: (&brmctx->ip4_mc_router_timer) FD: 1 BD: 69 ....: (&brmctx->ip4_other_query.timer) FD: 1 BD: 69 ....: (&brmctx->ip6_mc_router_timer) FD: 1 BD: 69 ....: (&brmctx->ip6_other_query.timer) FD: 28 BD: 77 +.-.: (&tw->tw_timer) ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->stock_lock ->&obj_hash[i].lock ->&dccp_hashinfo.bhash[i].lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 8 BD: 3 +.+.: sk_lock-AF_X25 ->slock-AF_X25 ->wlock-AF_X25 ->&list->lock#30 ->&obj_hash[i].lock ->x25_list_lock ->rlock-AF_X25 FD: 1 BD: 4 +...: slock-AF_X25 FD: 1 BD: 4 ....: wlock-AF_X25 FD: 1 BD: 4 ....: &list->lock#30 FD: 1 BD: 4 ....: rlock-AF_X25 FD: 1 BD: 69 +...: mfc_unres_lock FD: 1 BD: 69 +...: mfc_unres_lock#2 FD: 252 BD: 69 +.+.: sk_lock-AF_CAN ->slock-AF_CAN ->clock-AF_CAN ->&rq->__lock ->j1939_netdev_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&priv->lock ->&priv->j1939_socks_lock ->&jsk->sk_session_queue_lock ->&mm->mmap_lock ->&list->lock#36 ->&obj_hash[i].lock ->&priv->active_session_list_lock ->hrtimer_bases.lock ->&data->lock ->&jsk->waitq ->&____s->seqcount#2 ->&____s->seqcount ->pcpu_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->tk_core.seq.seqcount ->&list->lock#5 ->&ent->pde_unload_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 70 +...: slock-AF_CAN FD: 1 BD: 70 ++..: clock-AF_CAN FD: 1 BD: 71 ..-.: rlock-AF_CAN FD: 1 BD: 3 ..-.: elock-AF_CAN FD: 1 BD: 27 ....: namespace_sem.wait_lock FD: 1 BD: 3 +...: clock-AF_IEEE802154 FD: 1 BD: 3 +...: raw_lock FD: 292 BD: 1 +.+.: bpf_stats_enabled_mutex ->&rq->__lock ->&newf->file_lock ->fs_reclaim ->stock_lock ->pool_lock#2 ->&sb->s_type->i_lock_key#15 ->&c->lock ->cpu_hotplug_lock ->&n->list_lock ->bpf_stats_enabled_mutex.wait_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 4 +...: clock-AF_PPPOX FD: 1 BD: 4 +...: slock-AF_PPPOX FD: 1 BD: 8 +...: &list->lock#31 FD: 1 BD: 3 ....: rlock-AF_IEEE802154 FD: 252 BD: 3 +.+.: sk_lock-AF_PPPOX ->slock-AF_PPPOX ->&pn->hash_lock ->clock-AF_PPPOX ->rlock-AF_PPPOX ->fs_reclaim ->pool_lock#2 ->&ps->sk_lock ->&tunnel->hlist_lock ->&pn->l2tp_session_hlist_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&obj_hash[i].lock ->&list->lock#34 ->chan_lock ->&rnp->exp_wq[3] ->&rq->__lock ->&dir->lock ->&pn->all_channels_lock ->&mm->mmap_lock ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&____s->seqcount#2 ->&____s->seqcount ->&pch->chan_sem ->&pch->upl ->&pf->rwait ->&cfs_rq->removed.lock ->&list->lock#40 ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[2] FD: 1 BD: 4 ..-.: rlock-AF_PPPOX FD: 1 BD: 1 +...: &list->lock#32 FD: 1 BD: 3 +.+.: dev_map_lock FD: 1 BD: 85 +.-.: &r->producer_lock#5 FD: 65 BD: 3 +.+.: sk_lock-AF_CAIF ->slock-AF_CAIF ->&rq->__lock ->&obj_hash[i].lock ->&this->info_list_lock ->(console_sem).lock ->&ei->socket.wq.wait ->clock-AF_CAIF ->elock-AF_CAIF ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->console_owner_lock ->console_owner FD: 44 BD: 83 ....: &new->fa_lock ->&f->f_owner.lock FD: 1 BD: 2 +.+.: misc_mtx.wait_lock FD: 1 BD: 4 +...: slock-AF_CAIF FD: 1 BD: 3 +...: rlock-AF_CAIF FD: 1 BD: 4 +...: clock-AF_CAIF FD: 1 BD: 4 ....: elock-AF_CAIF FD: 154 BD: 1 +.+.: sk_lock-AF_RDS ->slock-AF_RDS ->batched_entropy_u16.lock ->once_lock ->pool_lock#2 ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->rds_trans_sem ->(console_sem).lock FD: 1 BD: 2 +...: slock-AF_RDS FD: 1 BD: 9 ....: tracepoint_srcu FD: 1 BD: 4 +...: key#25 FD: 165 BD: 6 +.+.: (work_completion)(&(&local->scan_work)->work) ->&rq->__lock ->&local->mtx FD: 32 BD: 1 ..-.: &(&local->scan_work)->timer FD: 1 BD: 3 ..-.: wlock-AF_PPPOX FD: 32 BD: 1 ..-.: &(&conn->disc_work)->timer FD: 1 BD: 9 +.+.: ebt_mutex.wait_lock FD: 1 BD: 3 +...: clock-AF_NFC FD: 1 BD: 3 ....: rlock-AF_NFC FD: 4 BD: 4 +...: &bat_priv->tp_list_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 ....: (&tp_vars->timer) FD: 1 BD: 1 +...: &tp_vars->unacked_lock FD: 242 BD: 3 +.+.: sk_lock-AF_AX25 ->slock-AF_AX25 ->clock-AF_AX25 ->ax25_list_lock ->&obj_hash[i].lock ->&list->lock#33 ->rlock-AF_AX25 ->&rq->__lock ->wlock-AF_AX25 ->&mm->mmap_lock ->ax25_uid_lock FD: 1 BD: 4 +...: slock-AF_AX25 FD: 1 BD: 4 +...: clock-AF_AX25 FD: 1 BD: 4 +...: ax25_list_lock FD: 1 BD: 4 ....: &list->lock#33 FD: 1 BD: 4 ....: rlock-AF_AX25 FD: 1 BD: 4 ....: wlock-AF_AX25 FD: 1 BD: 5 ++++: ax25_uid_lock FD: 1 BD: 4 +...: rose_node_list_lock FD: 1 BD: 138 ....: key#26 FD: 28 BD: 22 ....: &sk->sk_lock.wq#2 ->&p->pi_lock FD: 1 BD: 82 +.+.: &map->owner.lock FD: 34 BD: 2 +.+.: (work_completion)(&aux->work)#2 ->&aux->poke_mutex ->map_idr_lock ->&obj_hash[i].lock FD: 1 BD: 4 +.+.: &aux->poke_mutex FD: 30 BD: 1 ..-.: net/ipv4/devinet.c:474 FD: 21 BD: 117 +.-.: &dccp_hashinfo.bhash[i].lock ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->clock-AF_INET ->&dccp_hashinfo.bhash2[i].lock ->&obj_hash[i].lock ->clock-AF_INET6 FD: 20 BD: 118 +.-.: &dccp_hashinfo.bhash2[i].lock ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->clock-AF_INET ->&obj_hash[i].lock ->clock-AF_INET6 ->batched_entropy_u8.lock ->&hashinfo->ehash_locks[i] FD: 1 BD: 83 +.+.: pack_mutex.wait_lock FD: 30 BD: 1 ..-.: &(&hctx->run_work)->timer FD: 1 BD: 1 +.+.: &type->s_umount_key#48 FD: 297 BD: 4 +.+.: net_dm_mutex ->&obj_hash[i].lock ->fs_reclaim ->pool_lock#2 ->&data->lock ->&rq->__lock ->&c->lock ->tracepoints_mutex FD: 142 BD: 3 +.+.: nlk_cb_mutex-NETFILTER ->pool_lock#2 ->fs_reclaim ->&c->lock ->&n->list_lock ->&rq->__lock ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&nf_conntrack_locks[i] ->&____s->seqcount#7 ->&nf_conntrack_locks[i]/1 ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 69 +...: &pmc->lock FD: 1 BD: 4 +.+.: hidp_sk_list.lock FD: 1 BD: 69 ....: (&br->topology_change_timer) FD: 1 BD: 69 ....: (&br->tcn_timer) FD: 1 BD: 69 ....: (&br->hello_timer) FD: 14 BD: 3922 ..-.: &data->lock ->&obj_hash[i].lock ->&base->lock FD: 30 BD: 1 +.-.: (&data->send_timer) FD: 140 BD: 2 +.+.: (work_completion)(&data->dm_alert_work) ->fs_reclaim ->&c->lock ->pool_lock#2 ->&data->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->remove_cache_srcu ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&rq->__lock ->quarantine_lock ->&base->lock ->rcu_node_0 FD: 32 BD: 1 +.-.: (&pool->idle_timer) ->&pool->lock/1 ->&pool->lock FD: 21 BD: 1 +.+.: (wq_completion)phy16 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy15 ->(work_completion)(&local->reconfig_filter) FD: 451 BD: 1 +.+.: sock_diag_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->rlock-AF_NETLINK ->sock_diag_table_mutex ->(kmod_concurrent_max).lock ->&x->wait#17 ->running_helpers_waitq.lock FD: 21 BD: 1 +.+.: (wq_completion)phy17 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 7 +...: &vvs->rx_lock FD: 21 BD: 1 +.+.: (wq_completion)phy18 ->(work_completion)(&local->reconfig_filter) FD: 14 BD: 1 +.-.: (t) ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 12 +...: &pn->l2tp_session_hlist_lock FD: 1 BD: 75 ....: &sw_ctx_rx->wq FD: 30 BD: 1 ..-.: security/integrity/ima/ima_queue_keys.c:35 FD: 29 BD: 81 +.+.: &sta->ampdu_mlme.mtx ->&sta->lock ->&rq->__lock FD: 832 BD: 2 +.+.: (work_completion)(&(&rdev->dfs_update_channels_wk)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4 ....: &list->lock#34 FD: 1 BD: 71 +...: _xmit_TUNNEL#2 FD: 30 BD: 2 +.+.: sk_lock-AF_ALG/1 ->slock-AF_ALG ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 2 ....: (&est->timer) FD: 1 BD: 70 +...: &ipvs->sync_buff_lock FD: 1 BD: 1 +...: &ipvs->sync_lock FD: 5 BD: 2 +.+.: (ima_keys_delayed_work).work ->ima_keys_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 225 +.+.: rcu_state.exp_wake_mutex.wait_lock FD: 1 BD: 81 +.+.: (work_completion)(&sta->ampdu_mlme.work) FD: 31 BD: 80 +.-.: (&ifibss->timer) ->&rdev->wiphy_work_lock FD: 139 BD: 1 +.+.: &xn->hash_lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&est->lock ->(&est->timer) ->&obj_hash[i].lock ->&base->lock ->krc.lock ->&c->lock FD: 14 BD: 2 +...: &est->lock ->&obj_hash[i].lock ->&base->lock FD: 28 BD: 149 ....: &bdi->wb_waitq ->&p->pi_lock FD: 141 BD: 1 ++++: kn->active#59 ->fs_reclaim ->&c->lock ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->(wq_completion)cpuset_migrate_mm ->&wq->mutex FD: 3 BD: 4 +.+.: &ps->sk_lock ->&tunnel->hlist_lock FD: 10 BD: 10 +...: &pn->l2tp_tunnel_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 2 BD: 11 +...: &tunnel->hlist_lock ->&pn->l2tp_session_hlist_lock FD: 13 BD: 6 +.+.: (work_completion)(&tunnel->del_work) ->&tunnel->hlist_lock ->&pn->l2tp_tunnel_idr_lock FD: 1 BD: 1 ....: _rs.lock#5 FD: 196 BD: 69 +.+.: team->team_lock_key#7 ->net_rwsem ->&dev_addr_list_lock_key#2 ->&rq->__lock ->&tn->lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->fs_reclaim ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->netpoll_srcu ->_xmit_IPGRE ->&dir->lock#2 ->input_pool.lock ->&c->lock ->cbs_list_lock ->&____s->seqcount ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&n->list_lock ->(console_sem).lock ->&dev_addr_list_lock_key#2/1 ->lweventlist_lock ->remove_cache_srcu FD: 1 BD: 7 ..-.: &list->lock#35 FD: 40 BD: 1 +.-.: net/netrom/nr_loopback.c:18 ->&list->lock#35 ->nr_list_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->slock-AF_NETROM ->&base->lock ->&data->lock ->&c->lock FD: 1 BD: 69 +.+.: team->team_lock_key#8 FD: 1 BD: 69 +.+.: team->team_lock_key#9 FD: 1 BD: 71 +...: &net->can.rcvlists_lock FD: 1 BD: 71 ++.-: &priv->lock FD: 8 BD: 70 +.-.: &priv->j1939_socks_lock ->pool_lock#2 ->rlock-AF_CAN ->&c->lock FD: 1 BD: 70 +.-.: &jsk->sk_session_queue_lock FD: 1 BD: 70 ..-.: &list->lock#36 FD: 1 BD: 70 +.-.: &priv->active_session_list_lock FD: 1 BD: 70 ....: &jsk->waitq FD: 39 BD: 1 +.-.: (&sk->sk_timer)#2 ->slock-AF_NETROM ->nr_list_lock ->&obj_hash[i].lock ->wlock-AF_NETROM ->&list->lock#22 ->rlock-AF_NETROM ->&base->lock ->&data->lock ->pool_lock#2 ->&____s->seqcount FD: 5 BD: 70 +...: &(to_police(*a)->tcfp_lock) ->tk_core.seq.seqcount FD: 1 BD: 3 +.+.: raw_notifier_lock FD: 137 BD: 69 +.+.: &tn->idrinfo->lock ->fs_reclaim ->pool_lock#2 FD: 32 BD: 1 ..-.: &(&bond->arp_work)->timer FD: 31 BD: 1 +.+.: (wq_completion)bond10 ->(work_completion)(&(&bond->arp_work)->work) ->&rq->__lock FD: 12 BD: 69 +...: &p->tcfa_lock ->&(to_police(*a)->tcfp_lock) FD: 31 BD: 1 +.+.: (wq_completion)bond8 ->(work_completion)(&(&bond->arp_work)->work) ->&rq->__lock FD: 31 BD: 1 +.+.: (wq_completion)bond9 ->(work_completion)(&(&bond->arp_work)->work) ->&rq->__lock FD: 31 BD: 1 +.+.: (wq_completion)bond4#2 ->(work_completion)(&(&bond->arp_work)->work) ->&rq->__lock FD: 31 BD: 1 +.+.: (wq_completion)bond11 ->(work_completion)(&(&bond->arp_work)->work) ->&rq->__lock FD: 35 BD: 2 +.+.: (work_completion)(&pool->idle_cull_work) ->wq_pool_attach_mutex ->wq_pool_attach_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 178 BD: 8 +.+.: &sb->s_type->i_mutex_key#8/4 ->&sem->wait_lock ->&rq->__lock ->&ei->i_data_sem ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&ei->i_data_sem/1 ->mmu_notifier_invalidate_range_start ->&journal->j_state_lock ->jbd2_handle ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&c->lock ->&n->list_lock ->&p->pi_lock ->remove_cache_srcu FD: 102 BD: 156 +.+.: &ei->i_data_sem/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&ei->i_es_lock ->&ret->b_state_lock ->&ei->i_raw_lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->&ei->i_prealloc_lock ->&base->lock ->&mapping->private_lock ->&sem->wait_lock ->bit_wait_table + i FD: 1 BD: 13 +...: &list->lock#38 FD: 1 BD: 3 ....: &list->lock#37 FD: 137 BD: 69 +.+.: &tn->idrinfo->lock#2 ->fs_reclaim ->&c->lock ->pool_lock#2 ->&n->list_lock ->&rq->__lock FD: 161 BD: 1 +.+.: (wq_completion)bond2 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 15 +.+.: sco_sk_list.lock FD: 242 BD: 13 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->&conn->lock#2 ->&obj_hash[i].lock ->&base->lock ->&ei->socket.wq.wait ->&rq->__lock ->sco_sk_list.lock ->&mm->mmap_lock FD: 1 BD: 14 +...: slock-AF_BLUETOOTH-BTPROTO_SCO FD: 1 BD: 14 +.+.: &conn->lock#2 FD: 860 BD: 1 .+.+: &rdma_nl_types[idx].sem ->devices_rwsem ->link_ops_rwsem FD: 2 BD: 1 +.-.: (&policy->timer) ->&policy->lock FD: 1 BD: 9 +.+.: tracepoints_mutex.wait_lock FD: 1 BD: 4 +.+.: oom_adj_mutex.wait_lock FD: 1 BD: 3 +...: dgram_lock FD: 1 BD: 9 +.+.: ovs_mutex.wait_lock FD: 139 BD: 1 .+.+: kn->active#60 ->fs_reclaim ->&c->lock ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 1 BD: 10 +.+.: ima_extend_list_mutex.wait_lock FD: 2 BD: 1 +...: &list->lock#39 ->rlock-AF_INET6 FD: 1 BD: 69 +.+.: team->team_lock_key#10 FD: 27 BD: 69 +.+.: team->team_lock_key#11 ->&rq->__lock FD: 1 BD: 4 +.+.: &net->smc.mutex_fback_rsn FD: 238 BD: 3 +.+.: sk_lock-AF_QIPCRTR ->slock-AF_QIPCRTR ->&rq->__lock ->clock-AF_QIPCRTR ->rlock-AF_QIPCRTR ->&mm->mmap_lock FD: 1 BD: 4 +...: slock-AF_QIPCRTR FD: 87 BD: 1 +.-.: (&asoc->timers[i]) ->slock-AF_INET6 FD: 1 BD: 4 +...: clock-AF_QIPCRTR FD: 1 BD: 4 ....: rlock-AF_QIPCRTR FD: 1 BD: 1 ....: _rs.lock#6 FD: 21 BD: 1 +.+.: (wq_completion)phy19 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 141 +.+.: wq_pool_attach_mutex.wait_lock FD: 21 BD: 1 +.+.: (wq_completion)phy20 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 4 +.+.: chan_lock FD: 21 BD: 1 +.+.: (wq_completion)phy21 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 71 +...: &qdisc_xmit_lock_key#4 FD: 2 BD: 6 +.+.: &match->lock ->ptype_lock FD: 139 BD: 1 .+.+: kn->active#61 ->fs_reclaim ->&c->lock ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 70 +.+.: &block->proto_destroy_lock FD: 141 BD: 69 +.+.: &chain->filter_chain_lock ->&block->lock ->&block->proto_destroy_lock FD: 1 BD: 1 ....: _rs.lock#7 FD: 141 BD: 75 +.+.: tcp_md5sig_mutex ->&rq->__lock ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 192 BD: 77 +.+.: k-sk_lock-AF_INET/1 ->&rq->__lock ->k-slock-AF_INET ->&dir->lock ->fs_reclaim ->&obj_hash[i].lock ->slock-AF_INET ->k-clock-AF_INET ->&hashinfo->ehash_locks[i] ->&c->lock ->pool_lock#2 ->tk_core.seq.seqcount ->&base->lock ->&n->list_lock ->remove_cache_srcu ->&h->lhash2[i].lock ->&tcp_hashinfo.bhash[i].lock ->&queue->rskq_lock FD: 1 BD: 1 +.-.: k-slock-AF_INET/1 FD: 30 BD: 1 ..-.: net/ipv4/tcp_ipv4.c:1064 FD: 290 BD: 2 +.+.: ((tcp_md5_needed).work).work ->cpu_hotplug_lock FD: 14 BD: 1 +.-.: (&tsc_sync_check_timer) ->&obj_hash[i].lock ->&base->lock FD: 73 BD: 2 +.+.: (work_completion)(&work->work)#2 ->&c->lock ->pool_lock#2 ->&n->list_lock ->&dir->lock#2 ->&ul->lock#2 ->&obj_hash[i].lock ->&____s->seqcount#2 ->&____s->seqcount FD: 168 BD: 1 +.+.: (wq_completion)bond8#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->alb_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mii_work)->work) FD: 32 BD: 1 ..-.: &(&bond->alb_work)->timer FD: 48 BD: 69 +...: &dev_addr_list_lock_key/2 ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key FD: 3 BD: 69 +.+.: &bond->stats_lock/2 FD: 53 BD: 1 +.+.: (wq_completion)bond9#2 ->(work_completion)(&(&bond->alb_work)->work) ->(work_completion)(&(&bond->mii_work)->work) ->&rq->__lock FD: 1 BD: 1 ....: _rs.lock#8 FD: 1 BD: 2 .+.+: sb_writers#15 FD: 1 BD: 3 +.+.: bcm_notifier_lock FD: 168 BD: 1 +.+.: (wq_completion)bond5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->alb_work)->work) ->(work_completion)(&(&bond->mii_work)->work) ->&rq->__lock FD: 168 BD: 1 +.+.: (wq_completion)bond6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->alb_work)->work) ->(work_completion)(&(&bond->mii_work)->work) ->&rq->__lock FD: 1 BD: 6 ++..: ip_set_ref_lock FD: 1 BD: 72 +.+.: (work_completion)(&(&priv->gc_work)->work) FD: 292 BD: 1 .+.+: kn->active#62 ->fs_reclaim ->&c->lock ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock ->&rq->__lock FD: 143 BD: 1 ++++: kn->active#63 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&cgrp->pidlist_mutex ->&c->lock ->&n->list_lock ->&rq->__lock FD: 30 BD: 1 ..-.: &(&l->destroy_dwork)->timer FD: 141 BD: 4 +.+.: (work_completion)(&(&l->destroy_dwork)->work) ->&cgrp->pidlist_mutex ->&obj_hash[i].lock FD: 191 BD: 77 +.+.: (work_completion)(&link->csa_finalize_work) ->&wdev->mtx FD: 146 BD: 4 +.+.: &drbg->drbg_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 ->(kmod_concurrent_max).lock ->&rq->__lock ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->&x->wait#17 ->running_helpers_waitq.lock ->remove_cache_srcu ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->(&timer.timer) ->&____s->seqcount ->crngs.lock ->&rng->jent_lock ->&cfs_rq->removed.lock FD: 4 BD: 7 +.+.: &rng->jent_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 32 BD: 1 +.-.: (&peer->timer_zero_key_material) FD: 140 BD: 9 +.+.: (work_completion)(&peer->clear_peer_work) ->&handshake->lock ->&peer->keypairs.keypair_update_lock ->&rq->__lock FD: 1 BD: 155 ..-.: &pool->lock#3 FD: 236 BD: 1 +.+.: &tfile->napi_mutex ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&n->list_lock ->&rq->__lock ->&mm->mmap_lock FD: 28 BD: 5 ....: &pgdat->reclaim_wait[i] ->&p->pi_lock FD: 1 BD: 1 ..-.: &pool->wait FD: 161 BD: 1 +.+.: (wq_completion)bond12 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond13 ->(work_completion)(&(&slave->notify_work)->work) FD: 49 BD: 98 +...: reuseport_lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->reuseport_ida.xa_lock ->clock-AF_INET6 ->&c->lock FD: 1 BD: 99 ..-.: reuseport_ida.xa_lock FD: 1 BD: 2 +.+.: loop_validate_mutex.wait_lock FD: 832 BD: 1 +.+.: ppp_mutex ->&rq->__lock ->&mm->mmap_lock ->fs_reclaim ->stock_lock ->pool_lock#2 ->stack_depot_init_mutex ->&c->lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&n->list_lock FD: 1 BD: 4 +.+.: bnep_sk_list.lock FD: 447 BD: 5 +.+.: nlk_cb_mutex-SOCK_DIAG ->fs_reclaim ->pool_lock#2 ->&c->lock ->inet_diag_table_mutex ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&net->packet.sklist_lock ->&n->list_lock FD: 21 BD: 1 +.+.: (wq_completion)phy22 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 75 ....: key#27 FD: 1 BD: 75 +.+.: acaddr_hash_lock FD: 1 BD: 3 +...: data_sockets.lock FD: 30 BD: 3 +.+.: sk_lock-AF_ISDN ->slock-AF_ISDN ->clock-AF_ISDN ->rlock-AF_ISDN ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_ISDN FD: 1 BD: 4 ....: rlock-AF_ISDN FD: 161 BD: 1 +.+.: (wq_completion)bond14 ->(work_completion)(&(&slave->notify_work)->work) FD: 34 BD: 1 +.-.: (&q->timer) ->&obj_hash[i].lock ->&data->lock ->pool_lock#2 FD: 161 BD: 1 +.+.: (wq_completion)bond15 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 72 +.+.: (work_completion)(&(&priv->gc_work)->work)#2 FD: 409 BD: 2 +.+.: (work_completion)(&nbd->remove_work) ->&disk->open_mutex ->&bdev->bd_size_lock ->&q->mq_freeze_lock ->set->srcu ->&q->mq_freeze_wq ->&bdev->bd_holder_lock ->&root->kernfs_rwsem ->(&bdi->laptop_mode_wb_timer) ->&obj_hash[i].lock ->&base->lock ->bdi_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&wb->work_lock ->cgwb_lock ->&pool->lock/1 ->&rq->__lock ->&(&wb->dwork)->timer ->(work_completion)(&(&wb->dwork)->work) ->&(&wb->bw_dwork)->timer ->(work_completion)(&(&wb->bw_dwork)->work) ->&bdi->cgwb_release_mutex ->pin_fs_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key#3 ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->pool_lock#2 ->mount_lock ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->&k->list_lock ->sysfs_symlink_target_lock ->subsys mutex#38 ->&x->wait#9 ->dpm_list_mtx ->&dev->power.lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->uevent_sock_mutex ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->gdp_mutex ->&q->sysfs_lock ->&q->sysfs_dir_lock ->&q->debugfs_mutex ->dev_hotplug_mutex ->req_lock ->&x->wait#11 ->subsys mutex#37 ->&c->lock ->percpu_ref_switch_lock ->&q->queue_lock ->(&q->timeout) ->(work_completion)(&q->timeout_work) ->(wq_completion)kintegrityd ->&wq->mutex ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->&ACCESS_PRIVATE(sdp, lock) ->&x->wait#3 ->&q->rq_qos_mutex ->&tags->lock ->cpu_hotplug_lock ->&xa->xa_lock#8 ->&q->unused_hctx_lock ->&set->tag_list_lock ->(&sq->pending_timer) ->(work_completion)(&td->dispatch_work) ->&q->blkcg_mutex ->pcpu_lock ->bio_slab_lock ->&xa->xa_lock#9 ->&sb->s_type->i_lock_key#3 ->&zone->lock ->&(&ssp->srcu_sup->work)->timer ->(work_completion)(&(&ssp->srcu_sup->work)->work) ->(&sdp->delay_work) ->(work_completion)(&sdp->work) ->nbd_index_mutex ->wq_mayday_lock ->&x->wait ->wq_pool_mutex ->rcu_node_0 ->&x->wait#10 FD: 1 BD: 3 ....: (&bdi->laptop_mode_wb_timer) FD: 410 BD: 1 +.+.: (wq_completion)nbd-del ->(work_completion)(&nbd->remove_work) FD: 2 BD: 3 +.+.: &bdi->cgwb_release_mutex ->cgwb_lock FD: 1 BD: 3 +.+.: (wq_completion)kintegrityd FD: 142 BD: 11 +.+.: &eq->sysfs_lock ->&q->debugfs_mutex ->&dd->lock ->&obj_hash[i].lock ->pool_lock#2 ->pcpu_lock FD: 1 BD: 1 ....: _rs.lock#9 FD: 32 BD: 70 +.+.: &tp->lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 2 BD: 71 +.+.: &xa->xa_lock#16 ->pool_lock#2 FD: 1 BD: 69 +.+.: &head->masks_lock FD: 146 BD: 1 +.+.: (wq_completion)tc_filter_workqueue ->(work_completion)(&(rwork)->work) FD: 145 BD: 2 +.+.: (work_completion)(&(rwork)->work) ->&obj_hash[i].lock ->(work_completion)(&ht->run_work) ->&ht->mutex ->&xa->xa_lock#16 ->pool_lock#2 ->krc.lock ->&dir->lock FD: 1 BD: 73 ....: (&local->dynamic_ps_timer) FD: 27 BD: 73 +.+.: (work_completion)(&local->dynamic_ps_enable_work) ->&rq->__lock FD: 1 BD: 73 +.+.: (work_completion)(&sdata->recalc_smps) FD: 1 BD: 73 +.+.: (work_completion)(&link->color_change_finalize_work) FD: 27 BD: 73 +.+.: (work_completion)(&(&link->dfs_cac_timer_work)->work) ->&rq->__lock FD: 1 BD: 4 +...: &pernet->lock FD: 1 BD: 8 +.+.: shares_mutex FD: 140 BD: 1 .+.+: kn->active#64 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->shares_mutex ->&c->lock ->&n->list_lock ->&rq->__lock FD: 2 BD: 4 +.+.: &pch->chan_sem ->&pch->downl FD: 1 BD: 4 +...: &pn->all_channels_lock FD: 1 BD: 5 +...: &pch->downl FD: 1 BD: 4 +...: &pch->upl FD: 1 BD: 70 ....: &pf->rwait FD: 1 BD: 4 ....: &list->lock#40 FD: 161 BD: 1 +.+.: (wq_completion)bond23 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond24 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 72 ....: umem_ida.xa_lock FD: 37 BD: 2 +.+.: (work_completion)(&umem->work) ->umem_ida.xa_lock ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->&rq->__lock ->&lruvec->lru_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 231 BD: 1 +.+.: (wq_completion)bond25 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 32 BD: 1 ..-.: &(&bond->mcast_work)->timer FD: 231 BD: 1 +.+.: (wq_completion)bond26 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond19 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond20 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond21 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond22 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond23#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond24#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond25#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond26#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond27 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond28 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond27#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond29 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond28#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond30 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond29#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond30#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond31 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond32 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond46 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond47 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond48 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond49 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond50 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond51 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond52 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond53 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond54 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond48#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond55 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond49#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond56 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond50#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond57 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond51#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond52#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond53#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond54#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond56#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond55#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 27 BD: 3 +.+.: &knet->mutex ->&rq->__lock FD: 1 BD: 75 +...: &mux->lock FD: 2 BD: 130 +...: &mux->rx_lock ->rlock-AF_KCM FD: 161 BD: 1 +.+.: (wq_completion)bond57#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 29 BD: 3 +.+.: sk_lock-AF_KCM ->slock-AF_KCM ->clock-AF_KCM ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_KCM FD: 1 BD: 4 +...: clock-AF_KCM FD: 27 BD: 3 +.+.: (work_completion)(&kcm->tx_work) ->&rq->__lock FD: 1 BD: 131 ....: rlock-AF_KCM FD: 161 BD: 1 +.+.: (wq_completion)bond58 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond59 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond40 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond41 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond14#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond42 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond15#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond16 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond17 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond18 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond19#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond20#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond87 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond88 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond89 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 7 +...: &list->lock#41 FD: 252 BD: 1 +.+.: (wq_completion)vsock-loopback ->(work_completion)(&vsock->pkt_work) FD: 251 BD: 2 +.+.: (work_completion)(&vsock->pkt_work) ->&list->lock#41 ->vsock_table_lock ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->&data->lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->&rq->__lock FD: 146 BD: 6 +.+.: sk_lock-AF_VSOCK/1 ->slock-AF_VSOCK ->fs_reclaim ->pool_lock#2 ->&vvs->tx_lock ->vsock_table_lock ->&vvs->rx_lock ->&list->lock#41 ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->clock-AF_VSOCK ->rlock-AF_VSOCK FD: 1 BD: 7 +...: &vvs->tx_lock FD: 231 BD: 1 +.+.: (wq_completion)bond90 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond91 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond51#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond92 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond94 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond93 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond95 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond96 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond55#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond97 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond56#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond98 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond84 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond57#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond99 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond85 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond58#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond59#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond101 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond60 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 8 +...: xfrm_state_gc_lock FD: 231 BD: 1 +.+.: (wq_completion)bond102 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond61 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond103 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond62 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond86 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond104 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond63 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond105 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond64 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond106 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond65 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond107 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond66 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond108 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond67 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond109 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond68 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond88#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond110 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond69 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond111 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond70 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond112 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond71 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond113 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond72 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond114 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond73 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond90#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond115 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond74 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond116 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond91#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond75 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond92#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond117 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond76 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond118 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond77 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond119 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond78 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond120 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond121 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond122 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond79 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond80 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond123 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond81 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond124 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond82 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond83 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond126 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond84#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond127 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond85#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond128 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond93#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond86#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: _rs.lock#10 FD: 231 BD: 1 +.+.: (wq_completion)bond129 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond87#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond130 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond88#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond131 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond89#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond132 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond90#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond133 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond91#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond134 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond92#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond135 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond93#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 70 +.+.: __ip_vs_mutex.wait_lock FD: 1 BD: 1 ....: &wq#4 FD: 1 BD: 1 +.+.: &s->lock FD: 231 BD: 1 +.+.: (wq_completion)bond136 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond94#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond137 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond95#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond138 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond96#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond139 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond97#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond140 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond98#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond141 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond99#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond36 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond142 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond100 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond37 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond143 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond101#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond38 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond144 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond102#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond39 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond103#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond146 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond40#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond94#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond104#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond147 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond41#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond95#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond105#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond148 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond42#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond106#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 191 BD: 74 +.+.: (work_completion)(&ifmgd->monitor_work) ->&wdev->mtx FD: 27 BD: 73 +.+.: (work_completion)(&(&ifmgd->tdls_peer_del_work)->work) ->&rq->__lock FD: 1 BD: 80 +...: &ifmgd->teardown_lock FD: 1 BD: 80 ....: (&ifmgd->timer) FD: 334 BD: 2 +.+.: (work_completion)(&rdev->conn_work) ->&rdev->wiphy.mtx FD: 231 BD: 1 +.+.: (wq_completion)bond149 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond43 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond107#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond150 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond44 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond108#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond45 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond151 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond109#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond96#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond46#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond152 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond110#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 161 BD: 1 +.+.: (wq_completion)bond47#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond153 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond111#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond113#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond112#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond154 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond155 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond114#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond156 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond97#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond115#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond157 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond116#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond158 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond98#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond117#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond159 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond99#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond118#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond160 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond100#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond119#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond161 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond101#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond162 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond121#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond102#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond163 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond122#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond103#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond164 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond123#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond165 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond124#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond166 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond125 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond167 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond104#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond126#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond168 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond127#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond105#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond169 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond128#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond170 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond129#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond171 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond130#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond172 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond131#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond132#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond173 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond106#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond133#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond174 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond134#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond175 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond107#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond135#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond176 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond108#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond136#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond177 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond109#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond137#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond178 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond110#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond138#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond179 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond111#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond139#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond180 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond140#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond181 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond141#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond182 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond142#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond183 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond113#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond143#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond184 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond114#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond144#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond185 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond145 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond186 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond146#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond115#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond187 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond147#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond188 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond148#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond116#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond189 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond149#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond190 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond150#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond191 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond151#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond192 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond152#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond193 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond153#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond194 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond154#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond195 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond155#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond117#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond196 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond156#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond118#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond197 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond157#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond119#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond198 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond158#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond199 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond200 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond160#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond73#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond161#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond201 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond162#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond202 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond163#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond203 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond164#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond204 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond165#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond74#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond205 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond166#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond206 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond126#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond75#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond167#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond207 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond127#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond168#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond208 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond128#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond76#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond169#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond209 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond129#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond170#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond210 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond130#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond171#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond211 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond131#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond172#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond212 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond132#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond77#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond173#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond213 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond133#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond174#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond214 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond134#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond175#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond215 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond135#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond176#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond216 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond136#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond78#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond177#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond217 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond137#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond79#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond178#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond218 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond138#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond179#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond219 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond139#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond180#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond220 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond140#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond181#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond80#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond221 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond141#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond182#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond222 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond81#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond142#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond223 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond183#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond143#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond82#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond224 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond184#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond144#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond83#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond225 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond185#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond145#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond226 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond186#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond146#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond227 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond187#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond147#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond228 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond84#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond188#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond148#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond229 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond85#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond189#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond149#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond230 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond86#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond190#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond150#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond231 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond87#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond191#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond151#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond232 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond192#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond152#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond233 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond193#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond153#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond234 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond194#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond154#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond235 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond88#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond195#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond155#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond236 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond196#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond89#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond156#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond237 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond197#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond90#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond157#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond238 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond198#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond91#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond158#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond239 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond199#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond159#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond240 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond200#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond92#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond160#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond241 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond201#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond93#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond161#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond242 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond202#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond162#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond94#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond243 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond203#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond163#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond244 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond204#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond164#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond205#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond165#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond246 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond206#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond166#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond247 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond207#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond167#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond248 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond208#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond168#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond249 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond209#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond169#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond250 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond170#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond251 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 155 ....: key#28 FD: 231 BD: 1 +.+.: (wq_completion)bond211#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond171#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond95#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond252 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond212#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond172#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond253 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond96#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond213#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond173#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond254 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond214#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond174#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond255 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond215#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond175#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond256 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond216#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond176#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond257 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond217#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond177#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond258 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond218#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond178#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond259 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond219#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond97#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond179#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond220#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond98#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond180#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond260 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond221#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond99#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond181#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond261 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond222#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond182#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond100#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond262 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond223#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond183#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond263 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond101#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond224#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond184#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond264 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond102#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond225#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond185#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond265 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond226#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond186#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond266 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond227#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond187#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond267 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond228#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond188#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond268 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond229#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond189#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond269 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond230#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond190#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond270 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond231#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond191#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond103#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond271 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond232#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond192#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond104#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond272 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond233#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond193#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond273 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond234#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond194#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond274 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond235#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond195#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond275 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond236#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond196#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond276 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond237#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond197#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond277 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond238#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond198#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond278 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond239#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond105#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond199#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond279 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond240#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond200#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond280 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond241#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond201#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond242#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond281 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond202#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond243#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond203#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond244#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond283 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond204#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond245 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond106#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond284 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond205#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond246#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond107#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond285 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond206#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond247#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond108#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond207#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond248#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond109#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond286 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond208#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond249#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond110#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond287 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond209#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond250#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond111#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond288 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond210#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond251#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond112#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond289 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond211#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond252#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond113#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond290 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond212#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond253#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond114#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond291 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond213#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond254#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond292 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond214#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond115#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond255#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond293 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond215#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond256#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond294 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond216#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond257#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond295 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond217#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond258#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond296 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond218#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond259#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond297 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 39 BD: 3590 +...: &bridge_netdev_addr_lock_key/2 ->pool_lock#2 ->(console_sem).lock ->&c->lock FD: 231 BD: 1 +.+.: (wq_completion)bond219#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond260#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond298 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond220#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond221#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond299 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond261#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond300 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond262#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond223#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond301 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond263#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond224#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond302 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond264#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond116#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond225#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond303 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond265#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond226#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond304 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond227#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond266#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond117#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond305 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond228#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond267#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond306 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond118#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond229#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond268#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond307 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond230#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond119#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond269#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond308 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond231#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond270#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond309 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond232#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond271#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond310 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond233#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond120#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond272#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond311 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond234#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond273#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond312 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond235#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond313 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond236#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond275#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond314 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond237#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond276#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond121#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond315 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond238#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond277#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond122#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond316 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond239#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond123#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond278#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond317 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond124#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond279#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond318 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond240#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond280#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond319 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond241#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond281#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond320 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond242#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond125#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond282 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond321 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond243#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond283#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond322 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond244#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond284#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond323 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond245#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond126#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond285#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond324 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond246#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond286#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond247#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond325 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond287#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond326 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond248#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond288#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond249#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond289#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond328 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond250#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond290#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond329 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond251#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond291#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond330 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond252#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond127#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond292#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond331 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond253#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond293#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond332 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond254#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond294#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond128#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond333 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond255#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond295#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond129#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond334 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond256#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond296#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond130#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond335 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond257#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond297#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond131#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond336 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond258#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond298#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond337 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond259#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond132#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond299#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond338 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond260#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond133#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond300#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond339 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond261#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond134#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond301#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond340 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond262#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond135#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond302#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond341 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond263#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond303#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond136#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond342 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond264#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond304#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond265#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond305#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond137#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond343 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond266#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond306#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond138#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond344 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond267#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond307#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond139#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond345 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond268#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond308#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond140#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond346 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond269#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond309#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond141#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond347 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond270#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond310#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond142#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond348 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond271#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond311#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond349 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond143#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond272#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond312#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond350 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond273#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond144#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond313#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond351 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond145#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond314#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond315#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond353 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond274#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond316#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond354 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond275#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond317#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 161 BD: 1 +.+.: (wq_completion)bond146#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond355 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond318#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond356 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond276#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond319#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond357 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond277#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond320#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond358 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond278#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond359 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond321#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond279#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond360 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond147#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond322#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond280#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond361 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond323#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond148#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond281#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond362 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond324#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond282#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond363 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond325#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond283#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond364 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond326#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond284#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond365 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond327 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond285#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond328#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond329#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond286#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond366 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond149#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond330#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond287#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond367 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond331#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond288#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond150#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond368 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond332#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond289#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond369 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond333#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond290#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond370 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond334#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond291#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond371 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond335#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond292#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond372 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond336#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond293#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond373 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond294#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond374 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond337#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond295#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond375 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond338#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond296#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond376 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond339#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond297#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond377 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond151#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond340#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond298#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond378 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond341#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond299#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond379 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond342#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond300#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond380 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond301#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond381 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond344#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond302#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond382 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: _rs.lock#11 FD: 231 BD: 1 +.+.: (wq_completion)bond345#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond303#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond383 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond346#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond304#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond152#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond384 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond347#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond305#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond385 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond348#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond306#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond386 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond349#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond307#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond387 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond350#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond308#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond388 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond351#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond309#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond389 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond352 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond153#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 87 BD: 1 +.-.: &dreq->dreq_lock ->pool_lock#2 ->&dir->lock ->slock-AF_INET6 FD: 1 BD: 75 ....: wlock-AF_INET6 FD: 231 BD: 1 +.+.: (wq_completion)bond310#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond390 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond353#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond311#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond391 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond354#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond312#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond392 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 95 BD: 1 +.-.: (&msk->sk.icsk_retransmit_timer) ->slock-AF_INET FD: 231 BD: 1 +.+.: (wq_completion)bond355#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond313#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond393 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 87 BD: 1 +.-.: (&hc->tx_rtotimer) ->slock-AF_INET6 ->&obj_hash[i].lock ->pool_lock#2 ->&dir->lock ->stock_lock FD: 231 BD: 1 +.+.: (wq_completion)bond356#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond154#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond314#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond394 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond357#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond315#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond395 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond358#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond316#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond396 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond359#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond317#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond397 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond360#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond318#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond398 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond361#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond319#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond399 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond362#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond155#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond320#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond400 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond363#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond156#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond321#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond401 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond364#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond157#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond322#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond402 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond365#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond323#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond403 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond366#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond158#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond324#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond404 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond367#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond159#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond325#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond405 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond368#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond326#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond406 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond369#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond160#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond327#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond407 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond370#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond328#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond408 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond161#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond371#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond329#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond409 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond372#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond330#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond162#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond410 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond373#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond331#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond163#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond411 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond374#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond332#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond412 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond375#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond333#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond164#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond413 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond376#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: _rs.lock#12 FD: 17 BD: 18 +.+.: &pdata->netdev_lock ->pool_lock#2 ->&dir->lock#2 FD: 1 BD: 3 ....: ndev_hash_lock FD: 1 BD: 11 +.+.: devices.xa_lock FD: 833 BD: 17 +.+.: &rxe->usdev_lock ->&pdata->netdev_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond334#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond414 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond377#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond335#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 139 BD: 3649 +.+.: &table->lock#4 ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&table->rwlock ->&device->event_handler_rwsem ->&rq->__lock FD: 1 BD: 3650 ....: &table->rwlock FD: 1 BD: 3652 ++++: &device->event_handler_rwsem FD: 231 BD: 1 +.+.: (wq_completion)bond415 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 5 ....: &device->cache_lock FD: 1 BD: 3 +.+.: rdmacg_mutex FD: 29 BD: 13 +.+.: subsys mutex#84 ->&k->k_lock ->&rq->__lock FD: 836 BD: 1 +.+.: (wq_completion)infiniband ->(work_completion)(&work->work)#3 FD: 835 BD: 2 +.+.: (work_completion)(&work->work)#3 ->fs_reclaim ->pool_lock#2 ->&rxe->usdev_lock ->&device->cache_lock ->&obj_hash[i].lock ->&device->event_handler_rwsem FD: 211 BD: 12 ++++: &device->client_data_rwsem ->&xa->xa_lock#17 ->fs_reclaim ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#18 ->&rq->__lock ->&xa->xa_lock#19 ->crngs.lock ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->&x->wait#28 ->(console_sem).lock ->&x->wait#29 ->krc.lock ->lock ->&root->kernfs_rwsem ->ib_mad_port_list_lock ->kernfs_idr_lock ->lock#7 ->&n->list_lock ->umad_ida.xa_lock ->&x->wait#9 ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->subsys mutex#85 ->pcpu_alloc_mutex ->uverbs_ida.xa_lock ->subsys mutex#86 ->subsys mutex#87 ->rds_ib_devices_lock ->ib_nodev_conns_lock ->smc_ib_devices.mutex ->&device->event_handler_rwsem ->&pnettable->lock FD: 8 BD: 15 +.+.: &xa->xa_lock#17 ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 10 BD: 13 +.+.: &xa->xa_lock#18 ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount FD: 2 BD: 76 +.+.: &xa->xa_lock#19 ->pool_lock#2 FD: 1 BD: 13 ....: &x->wait#28 FD: 1 BD: 1 ....: _rs.lock#13 FD: 1 BD: 13 ....: &x->wait#29 FD: 1 BD: 13 ....: ib_mad_port_list_lock FD: 1 BD: 76 +.+.: &id_priv->qp_mutex FD: 2 BD: 76 +.+.: &xa->xa_lock#20 ->pool_lock#2 FD: 2 BD: 76 ....: &cm_id_priv->lock ->&cm.lock FD: 1 BD: 77 ....: &cm.lock FD: 1 BD: 13 ....: umad_ida.xa_lock FD: 3 BD: 13 +.+.: subsys mutex#85 ->&k->k_lock FD: 1 BD: 13 ....: uverbs_ida.xa_lock FD: 3 BD: 13 +.+.: subsys mutex#86 ->&k->k_lock FD: 29 BD: 13 +.+.: subsys mutex#87 ->&rq->__lock ->&k->k_lock FD: 27 BD: 13 +.+.: rds_ib_devices_lock ->&rq->__lock FD: 1 BD: 13 +.+.: ib_nodev_conns_lock FD: 1 BD: 1 ....: _rs.lock#14 FD: 1 BD: 1 ....: _rs.lock#15 FD: 834 BD: 2 +.+.: (work_completion)(&smcibdev->port_event_work) ->&rxe->usdev_lock ->&table->rwlock FD: 836 BD: 12 +.+.: &device->compat_devs_mutex ->fs_reclaim ->&xa->xa_lock#17 ->&c->lock ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&rq->__lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&____s->seqcount#2 ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#84 ->&rxe->usdev_lock ->&zone->lock ->rcu_node_0 ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->&sem->wait_lock ->&p->pi_lock ->&lock->wait_lock ->&rcu_state.expedited_wq ->uevent_sock_mutex.wait_lock ->&pgdat->kswapd_wait ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 1 ....: _rs.lock#16 FD: 231 BD: 1 +.+.: (wq_completion)bond378#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond416 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond336#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 11 BD: 91 +.--: &____s->seqcount#14 ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 12 BD: 90 +.-.: &(&bp->lock)->lock ->&____s->seqcount#14 FD: 231 BD: 1 +.+.: (wq_completion)bond379#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond417 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond337#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond418 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond380#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond338#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond381#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond419 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond339#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond420 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond382#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond340#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond421 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond341#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond422 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond383#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond165#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond342#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond423 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond384#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond166#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond343#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond424 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond385#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond167#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond344#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond425 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond386#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond345#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond426 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond387#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond346#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond427 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond388#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond347#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond428 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond389#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond348#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond168#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond429 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond390#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond349#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond430 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond391#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond350#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond392#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond431 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond351#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond169#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond393#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond432 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond352#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond394#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond433 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond353#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond395#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond434 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond170#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond354#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond396#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond435 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond355#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond397#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond436 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond356#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond398#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond171#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond437 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond399#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond438 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond358#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond400#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond439 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond359#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond401#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond440 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond360#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond402#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond172#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond441 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond361#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond403#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond173#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond442 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond362#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond404#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond174#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond405#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond443 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond363#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond175#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond406#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond444 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond407#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond445 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond408#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond176#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond366#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond446 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond367#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond177#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond447 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond409#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond368#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond178#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond448 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond369#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond410#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond449 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond370#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond411#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond450 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond371#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond179#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond412#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond451 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond372#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond180#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond413#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond452 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond373#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond181#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond414#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond374#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond182#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond454 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond415#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond375#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond455 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond416#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond376#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond456 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond417#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond377#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond183#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond457 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond378#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond184#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond458 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond379#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond459 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond418#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond380#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond185#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond460 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond419#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond461 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond420#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond381#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond462 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond421#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond382#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond463 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond422#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond383#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond464 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond423#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond384#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond465 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond424#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond385#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond466 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond425#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond386#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond467 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond186#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond426#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond468 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond187#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond427#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond387#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond469 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond188#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond428#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond388#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond470 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond189#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond429#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond389#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond471 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond190#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond390#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond472 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond430#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond391#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond473 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond431#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond392#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond474 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond191#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond432#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond475 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond394#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond433#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond192#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond476 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond395#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond434#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond477 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond396#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond435#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond478 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond397#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond193#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond436#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond398#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond479 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond437#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond194#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond399#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond480 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond438#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond195#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond400#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond481 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond196#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond401#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond482 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond439#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond197#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond402#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond483 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond440#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond198#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond403#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond484 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond441#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond404#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond485 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond442#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond405#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond199#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond486 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond443#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond406#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 45 BD: 3588 +...: &dev_addr_list_lock_key#3/2 ->&macvlan_netdev_addr_lock_key/1 ->&obj_hash[i].lock ->krc.lock FD: 161 BD: 1 +.+.: (wq_completion)bond200#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 71 +...: &qdisc_xmit_lock_key#5 FD: 1 BD: 69 +.+.: (work_completion)(&port->wq) FD: 231 BD: 1 +.+.: (wq_completion)bond487 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond444#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond407#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond445#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 161 BD: 1 +.+.: (wq_completion)bond201#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond488 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond408#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond446#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond489 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond409#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond202#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond447#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond490 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond410#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond448#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond491 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond411#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond449#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond492 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond412#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond450#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond493 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond413#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond451#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond494 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond452#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond495 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond415#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond453 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond496 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond416#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond454#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond497 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond417#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond455#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond498 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond418#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond456#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond499 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond419#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond457#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond203#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond500 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond420#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond501 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond458#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond502 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond421#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond459#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond503 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond422#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond460#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond504 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond423#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond461#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond505 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond424#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond462#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond506 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond425#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond463#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond507 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond426#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond464#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond508 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond427#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond465#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond509 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond428#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond510 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond429#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond466#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond511 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond430#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond467#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond512 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond431#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond468#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond513 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond432#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond469#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond514 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond433#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond470#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond515 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond434#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond471#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond516 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond435#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond472#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond517 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond436#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond473#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond518 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond437#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond474#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond519 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond438#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond475#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond520 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond439#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond476#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond521 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond440#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond477#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond522 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond441#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond478#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond523 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond442#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond479#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond524 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond443#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond480#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond525 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond444#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond481#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 1 ....: &head->lock FD: 231 BD: 1 +.+.: (wq_completion)bond526 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond445#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond482#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond527 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond446#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond483#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond528 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond447#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond484#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond529 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond448#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond485#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond530 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond449#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond486#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond531 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond450#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond487#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond532 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond451#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond488#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond533 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond452#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond489#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond534 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond453#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond490#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond535 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond454#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond491#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond536 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond455#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond492#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond537 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond456#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond493#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond538 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond457#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond494#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond539 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond458#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond495#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond540 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond459#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond496#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond541 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond460#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond497#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond542 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond461#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond498#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond543 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond462#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond499#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond544 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond463#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond500#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond545 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond464#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond501#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond546 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond465#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond502#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond547 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond466#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond503#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond548 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond467#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond504#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond549 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond468#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond505#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond550 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond469#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond506#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 137 BD: 69 +.+.: &tn->idrinfo->lock#3 ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond551 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond470#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond507#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond552 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond471#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond508#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond472#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond509#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond554 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond473#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond510#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond555 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond474#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond511#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond556 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond475#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond512#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond557 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond476#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond513#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond558 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond477#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond478#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond514#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 30 BD: 1 ..-.: &(&hinfo->gc_work)->timer FD: 231 BD: 1 +.+.: (wq_completion)bond479#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond515#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond480#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond516#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond481#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond517#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond482#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond518#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond483#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond519#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond484#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond520#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond485#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond486#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond487#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond488#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond489#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond490#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond491#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond492#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond493#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond494#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond495#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond496#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond497#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond498#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond499#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond500#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond501#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond502#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond503#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond504#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond505#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond506#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond507#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond508#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond509#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond510#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond511#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond512#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond513#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond514#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond515#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond516#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond600 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond518#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond603 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond604 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond605 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond606 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond607 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond608 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond609 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond566 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond610 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond567 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond611 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond568 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond612 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond569 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond613 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond570 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond614 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond205#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond571 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond572 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond615 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond207#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond573 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond616 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond617 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond574 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond618 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond575 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond619 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond576 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond209#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond620 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond577 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond210#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond578 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond211#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond621 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond579 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond622 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond580 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond623 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond581 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond624 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond582 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond625 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond212#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond583 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond626 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond213#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond584 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond585 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond627 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond586 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond215#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond587 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond628 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond588 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond629 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond589 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond630 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond631 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond591 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 2 ..-.: key#29 FD: 231 BD: 1 +.+.: (wq_completion)bond632 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond592 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond633 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond593 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond634 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond594 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond635 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond595 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond548#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond636 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond596 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond637 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond638 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond597 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond550#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond639 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond598 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond551#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 27 BD: 5 +.+.: &device->unregistration_lock ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond599 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond640 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond554#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond641 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond600#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond555#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond642 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond601 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond556#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond643 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond602 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond557#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond644 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond603#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond558#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond645 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond604#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond559 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond646 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond605#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond560 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond647 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond606#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond648 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond561 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond607#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond562 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond649 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond608#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond220#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond650 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond609#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond221#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond563 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond651 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond610#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond564 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond652 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond611#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond565 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond653 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond612#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond566#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond654 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond613#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond567#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond655 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond614#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond568#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond656 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond615#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond569#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond657 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond570#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond616#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond658 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond571#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond617#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond659 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond572#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond618#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond660 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond573#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond619#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond661 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond574#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond620#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond662 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond575#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond621#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond663 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond576#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond622#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond664 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond577#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond623#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond665 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond578#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond624#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond666 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond579#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond625#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond667 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond580#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond626#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond668 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond581#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond627#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond669 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond582#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond628#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond583#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond629#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond671 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond584#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond630#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond672 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond631#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond673 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond586#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond632#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond674 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond587#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond633#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond675 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond588#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond634#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond676 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond589#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond635#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond677 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond590 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond636#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond678 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond591#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond637#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond679 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond592#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond638#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond680 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond593#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond639#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond681 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond594#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond640#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond682 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond595#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond641#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond683 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond684 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond643#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond596#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond685 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond644#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond597#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond686 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond645#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond598#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond687 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond646#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond599#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond688 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond647#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond600#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond689 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond648#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond601#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond690 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond649#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond602#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond691 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond603#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond692 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond651#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond604#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond693 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond652#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond605#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond694 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond653#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond606#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond695 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond654#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond607#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond696 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond655#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond608#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond697 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond656#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond609#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond698 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond657#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond610#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond699 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond658#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond611#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond700 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond659#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond612#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond701 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond660#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond613#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond702 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond661#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond614#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond703 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond615#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond663#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond704 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond616#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond664#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond705 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond617#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond665#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond706 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond618#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond666#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond707 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond619#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond667#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond708 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond620#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond668#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond709 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond621#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond669#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond710 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond622#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond670 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond711 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond623#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond671#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond712 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond624#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond672#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond713 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond625#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond673#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond714 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond626#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond674#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond715 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond627#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond675#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond716 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond628#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond676#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond717 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond629#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond677#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond718 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond630#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond678#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond719 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond631#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond679#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond720 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond680#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond721 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond632#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond681#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond722 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond633#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond682#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond723 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond634#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond683#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond724 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond635#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond684#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond725 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond636#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond685#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond726 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond637#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond686#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond727 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond638#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond687#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond728 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond639#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond688#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond729 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond640#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond689#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond730 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond641#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond690#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond731 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond642#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond691#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond732 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond643#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond692#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond644#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond693#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond734 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond645#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond694#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond735 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond646#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond9#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond695#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond736 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond647#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond696#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond737 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond648#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond697#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond738 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond649#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond698#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond739 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond650#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond10#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond699#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond740 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond700#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond651#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond701#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond11#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond741 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond652#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond702#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond12#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond742 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond653#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond703#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond13#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond14#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond743 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond654#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond704#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond15#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond744 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond655#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond705#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond16#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond745 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond656#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond706#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond746 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond657#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond747 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond658#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond707#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond748 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond659#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond708#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond660#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond709#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond661#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond710#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond662#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond17#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond711#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond663#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond18#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond749 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond712#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond750 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond713#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond664#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond19#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond751 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond714#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond665#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond752 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond20#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond715#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond666#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond21#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond753 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond667#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond22#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond754 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond716#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond668#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond23#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond755 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond717#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond669#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond24#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond756 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond670#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond757 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond718#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond671#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond25#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond719#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond26#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond720#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond672#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond758 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond721#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond673#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond759 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond27#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond722#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond674#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond28#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond675#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond29#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond723#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond676#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond760 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond724#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond677#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond761 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond30#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond725#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond762 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond31#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond726#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond678#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond763 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond32#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond764 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond679#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond727#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond765 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond680#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond728#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond766 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond729#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond33 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond767 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond681#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond768 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond682#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond34 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond769 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond683#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond35 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond730#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond770 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond684#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond731#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond771 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond685#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond732#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond772 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond686#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond687#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond36#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond688#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond37#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond733 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond773 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond38#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond734#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond774 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond775 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond689#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond690#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond735#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond776 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond691#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond736#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond777 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond692#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond737#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond778 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond693#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond39#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond738#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond694#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond40#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond739#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond779 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond695#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond41#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond780 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond696#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond42#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond740#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond781 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond697#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond43#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond782 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond741#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond783 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond698#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond742#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond784 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond743#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond785 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond44#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond699#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond744#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond700#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond45#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond745#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond786 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond701#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond787 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond702#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond46#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond746#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond788 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond747#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond748#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond703#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond749#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond789 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond704#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond750#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond751#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond790 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond705#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond752#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond791 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond753#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond792 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond706#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond754#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond793 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond707#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond755#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond47#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond794 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond708#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond795 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond709#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond756#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond796 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond710#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond797 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond711#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond757#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond798 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond712#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond758#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond799 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond713#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond800 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond714#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond801 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond715#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond759#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond802 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond760#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond716#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond761#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond803 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond717#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond762#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond804 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond718#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond763#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond719#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond764#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond805 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond765#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond806 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond720#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 505 BD: 1 +.+.: (wq_completion)kstrp ->&rq->__lock ->(work_completion)(&strp->work)#2 ->(work_completion)(&barr->work) FD: 27 BD: 1 +.+.: (work_completion)(&(&strp->msg_timer_work)->work) ->&rq->__lock FD: 504 BD: 2 +.+.: (work_completion)(&strp->work)#2 ->&rq->__lock ->sk_lock-AF_INET6 ->slock-AF_INET6 FD: 231 BD: 1 +.+.: (wq_completion)bond807 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond721#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond766#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond808 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond722#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond767#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond809 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond723#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond724#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond768#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond810 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond725#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond769#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond811 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond726#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond770#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond812 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond727#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond771#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond728#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond772#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond813 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond729#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond773#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond814 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond730#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond774#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond815 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond816 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond731#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond775#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond817 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond732#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond776#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond818 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond733#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond777#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond819 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond734#3 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond778#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond820 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond735#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond779#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond821 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond736#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond780#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond822 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond737#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond781#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond738#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond782#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond824 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond739#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond783#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond825 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond740#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond784#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond826 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond741#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond785#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond827 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond742#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond786#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond828 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond743#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond787#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond829 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond744#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond830 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond745#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond788#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond831 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond746#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond789#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond832 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond790#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond833 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 231 BD: 1 +.+.: (wq_completion)bond747#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond791#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond834 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond792#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond835 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond748#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond793#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond836 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond749#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond794#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond48#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond837 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond795#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond49#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond838 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond796#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond750#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond751#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond797#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond839 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond50#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond840 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond51#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond752#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond798#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond841 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond842 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond753#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond843 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond754#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond799#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond844 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond755#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond845 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond800#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond801#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond846 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond756#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond802#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond847 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond757#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond803#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond848 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond758#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond804#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond849 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond759#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond850 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond760#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond805#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond851 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond806#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond761#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond807#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond808#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond763#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond852 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond809#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond764#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond853 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond765#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond810#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond854 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond811#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond766#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond855 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond812#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond767#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond856 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond813#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond768#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond857 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond815#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond769#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond858 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond770#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond859 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond816#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond771#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond860 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond817#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond772#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond861 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond818#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond862 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond819#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond773#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond863 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond820#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond774#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond864 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond821#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond775#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond822#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond776#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond865 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond823 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond777#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond866 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond824#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond867 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond825#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond778#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond868 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond826#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond869 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond779#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond827#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond870 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond780#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond828#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond781#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond829#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond830#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond782#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond871 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond872 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond783#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond831#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond832#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond873 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond784#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond833#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond785#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond875 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond834#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond786#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond876 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond835#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond836#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond787#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond877 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond837#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond788#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond878 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond838#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond879 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond839#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond789#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond880 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond840#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond790#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond881 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond841#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond791#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond882 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond842#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond792#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond883 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond843#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond793#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond884 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond794#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond885 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond844#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond795#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond886 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond796#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond887 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond845#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond797#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond888 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond889 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond846#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond798#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond847#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond890 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond799#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond848#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond891 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond800#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond849#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond892 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond801#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond893 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond802#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond850#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond894 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond803#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond851#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock FD: 231 BD: 1 +.+.: (wq_completion)bond804#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond852#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond895 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond805#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond853#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond896 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond806#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond897 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond854#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond898 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond855#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond807#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond899 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond856#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond808#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond900 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond857#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond858#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond901 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond809#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond902 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond859#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond810#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond903 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond860#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond811#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond861#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond904 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond812#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond905 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond862#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond813#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond814#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond906 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond863#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond907 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond864#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond908 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond815#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond909 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond816#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond910 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond817#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond911 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond818#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond912 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond865#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond819#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond913 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond866#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond820#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond914 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond868#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond915 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond821#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond869#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond870#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond916 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond822#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond871#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond823#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond872#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 1 ....: _rs.lock#17 FD: 231 BD: 1 +.+.: (wq_completion)bond873#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond917 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond824#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond825#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond874 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond875#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond918 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond826#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond919 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond827#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond920 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond828#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond876#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond921 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond829#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond877#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond830#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond878#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond922 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond831#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond879#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond923 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond924 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond832#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond880#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond925 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond881#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond926 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond833#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond927 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond834#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond882#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond835#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond836#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond928 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond884#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond929 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond885#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond930 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond886#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond837#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond931 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond887#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond932 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond933 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond888#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond838#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond934 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond889#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond839#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond935 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond890#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond840#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond841#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond936 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond891#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond937 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond892#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond842#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond938 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond893#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond843#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond939 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond894#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4289 ....: key#30 FD: 29 BD: 4248 +.+.: &cache->alloc_lock ->swap_avail_lock ->&p->lock#2 ->&rq->__lock FD: 49 BD: 4248 +.+.: shmem_swaplist_mutex ->&rq->__lock ->&xa->xa_lock#21 ->&info->lock ->&p->lock#2 ->&xa->xa_lock#7 ->rcu_node_0 FD: 1 BD: 4248 +.+.: &tree->lock FD: 10 BD: 4249 ....: &xa->xa_lock#21 ->pool_lock#2 ->key#30 ->&ctrl->lock ->&c->lock FD: 1 BD: 4250 ....: &ctrl->lock FD: 231 BD: 1 +.+.: (wq_completion)bond895#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4248 +.+.: &vmpr->sr_lock FD: 231 BD: 1 +.+.: (wq_completion)bond896#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond940 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond941 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 75 BD: 1 .+.+: &type->s_umount_key#49 ->&rq->__lock ->&lru->node[i].lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#24 ->rcu_node_0 ->&rcu_state.expedited_wq ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->&wb->list_lock ->kernfs_idr_lock ->&cfs_rq->removed.lock FD: 1 BD: 4251 +.+.: f2fs_list_lock FD: 231 BD: 1 +.+.: (wq_completion)bond942 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond844#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond943 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond845#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond944 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond846#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond945 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond847#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 30 BD: 1 ..-.: &(&krcp->page_cache_work)->timer FD: 231 BD: 1 +.+.: (wq_completion)bond946 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond848#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond947 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond897#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond849#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond948 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond898#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond850#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond949 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond899#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond851#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond950 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond900#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond901#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond902#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond852#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond951 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond903#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond952 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond853#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond904#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond854#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond953 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond855#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond905#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond954 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond856#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond955 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond857#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond906#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond858#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond956 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond859#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond907#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond957 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond908#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond860#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond861#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond909#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond958 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond910#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond959 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond862#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond863#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond960 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond911#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond961 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond864#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond912#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond962 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond865#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond913#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond914#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond915#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 71 BD: 1 .+.+: &type->s_umount_key#50 ->&rq->__lock ->&lru->node[i].lock ->&dentry->d_lock ->&obj_hash[i].lock ->&cfs_rq->removed.lock ->pool_lock#2 ->&sb->s_type->i_lock_key#30 ->&wb->list_lock ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->inode_hash_lock ->&fsnotify_mark_srcu FD: 231 BD: 1 +.+.: (wq_completion)bond963 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond964 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond916#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond965 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond866#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond867#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond917#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond967 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond918#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond968 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond919#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond969 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond920#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond868#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond921#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond971 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond972 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond870#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond973 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond974 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond975 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond872#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond976 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond873#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond977 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond874#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond922#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond978 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond923#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond979 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond875#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond924#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond925#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond980 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond876#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond981 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond877#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond926#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond982 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond878#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond927#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond983 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond928#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond984 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond929#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond985 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond930#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond986 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond879#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond931#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond987 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond880#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond932#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond988 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond881#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond933#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond989 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond882#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond934#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond883#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond935#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond990 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond884#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond991 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond885#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond936#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond992 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond887#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond937#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond888#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond938#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond994 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond889#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond939#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond890#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond940#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond995 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond891#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond941#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond996 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond892#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond942#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond943#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 142 BD: 1 +.+.: recent_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->proc_subdir_lock ->proc_inum_ida.xa_lock ->recent_lock ->&ent->pde_unload_lock ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->&n->list_lock ->recent_mutex.wait_lock FD: 231 BD: 1 +.+.: (wq_completion)bond944#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond893#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond945#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 2 +.+.: recent_mutex.wait_lock FD: 231 BD: 1 +.+.: (wq_completion)bond998 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond946#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond999 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond894#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond947#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1000 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond895#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1001 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond896#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond948#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond897#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond949#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond898#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond950#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond899#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1002 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond900#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond951#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1003 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond952#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 47 BD: 3 +.+.: &ep->mtx/1 ->&rq->__lock ->&f->f_lock ->&ep->lock FD: 1 BD: 3 ....: &ep->poll_wait FD: 231 BD: 1 +.+.: (wq_completion)bond901#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1004 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond902#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond903#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond953#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond904#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond954#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond905#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond955#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1005 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond906#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1006 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 28 BD: 1 +.+.: sk_lock-AF_IEEE802154 ->&rq->__lock ->slock-AF_IEEE802154 FD: 1 BD: 2 +...: slock-AF_IEEE802154 FD: 231 BD: 1 +.+.: (wq_completion)bond1007 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1008 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond907#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond956#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1009 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond908#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1010 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond909#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond957#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1011 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond910#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond958#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1012 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond959#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1013 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond911#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond960#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 1 ....: _rs.lock#18 FD: 231 BD: 1 +.+.: (wq_completion)bond1014 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond912#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1015 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond961#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1016 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1017 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond913#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond962#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond963#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1018 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 3 BD: 4267 ..-.: lock#10 ->&lruvec->lru_lock FD: 231 BD: 1 +.+.: (wq_completion)bond914#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond915#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond916#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond964#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1019 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond917#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond965#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1020 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond966 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1021 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond918#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond967#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond919#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond968#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1022 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond969#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond920#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond970 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1023 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond921#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond971#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1024 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond922#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond972#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1025 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1026 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond923#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1027 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond924#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1028 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond925#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond973#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1029 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond926#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond974#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1030 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond975#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1031 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1032 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond927#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond976#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1033 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond928#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1034 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond929#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1035 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond930#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond977#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1036 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1037 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond53#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond978#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond931#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond932#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond54#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1038 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond933#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond979#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1039 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond934#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond980#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond981#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond222#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond935#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond223#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond982#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1040 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond936#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond937#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond983#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond984#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond938#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond939#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond985#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond55#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond940#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond986#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1041 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond941#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1042 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond56#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond7 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond57#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1043 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond987#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond58#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1044 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond59#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond8#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond224#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond9#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1045 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond10#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1046 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond942#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond988#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1047 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond989#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond943#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond990#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond991#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1048 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond992#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1049 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1050 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond993 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond994#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond944#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 137 BD: 3 +.+.: callchain_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&n->list_lock ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1051 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond995#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond945#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond996#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1052 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond997 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond946#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1053 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond998#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond947#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1054 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond999#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond948#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1055 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1000#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond949#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1056 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 40 BD: 3589 +...: &macvlan_netdev_addr_lock_key/3 ->&bridge_netdev_addr_lock_key/2 ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 46 BD: 71 +...: &dev_addr_list_lock_key#2/4 ->&macvlan_netdev_addr_lock_key/3 FD: 231 BD: 1 +.+.: (wq_completion)bond950#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1057 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1058 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1001#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1059 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 137 BD: 69 +.+.: &tn->idrinfo->lock#4 ->fs_reclaim ->&c->lock ->pool_lock#2 FD: 231 BD: 1 +.+.: (wq_completion)bond951#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1002#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond952#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1003#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1060 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 140 BD: 1 +.+.: &audit_cmd_mutex.lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&list->lock ->kauditd_wait.lock ->rlock-AF_NETLINK ->&n->list_lock FD: 231 BD: 1 +.+.: (wq_completion)bond953#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1004#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1005#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1061 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond954#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1006#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1062 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 2 +.+.: bpf_stats_enabled_mutex.wait_lock FD: 231 BD: 1 +.+.: (wq_completion)bond1007#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1063 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1008#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1064 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1009#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1065 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond955#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1066 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond956#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1010#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond957#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1011#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1067 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1012#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1013#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1068 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1014#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1069 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond958#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1015#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond959#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1016#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond960#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1070 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1017#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond961#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1018#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1071 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1019#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1072 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond962#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 17 BD: 3649 -.-.: &q->current_entry_lock ->hrtimer_bases.lock FD: 231 BD: 1 +.+.: (wq_completion)bond963#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond964#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1073 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1020#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond965#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1074 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 69 +.+.: (work_completion)(&q->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1021#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond966#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1075 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1022#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond967#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1076 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1023#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond968#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1077 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond969#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1024#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1025#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1078 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond970#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1026#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 452 BD: 1 +.+.: (wq_completion)sock_diag_events ->(work_completion)(&bsk->work) FD: 451 BD: 2 +.+.: (work_completion)(&bsk->work) ->fs_reclaim ->pool_lock#2 ->sock_diag_table_mutex ->nl_table_lock ->&obj_hash[i].lock ->&rq->__lock ->nl_table_wait.lock ->&dir->lock ->stock_lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock FD: 231 BD: 1 +.+.: (wq_completion)bond971#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1079 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1027#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1080 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond972#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond226#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 3 +.+.: isotp_notifier_lock FD: 231 BD: 1 +.+.: (wq_completion)bond1028#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1081 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond973#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1082 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1029#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond974#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1083 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1030#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond975#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1084 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1031#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1085 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond976#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1032#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1086 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1087 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1033#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1034#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond977#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1088 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1035#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond978#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1089 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1036#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1037#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond979#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1090 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond980#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1038#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1091 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond981#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1039#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1092 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond982#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 2 +.+.: br_ioctl_mutex.wait_lock FD: 231 BD: 1 +.+.: (wq_completion)bond1040#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1093 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond983#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1041#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1042#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1094 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond984#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1095 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1043#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond985#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1096 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1044#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1097 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond61#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1045#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1098 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond986#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1046#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond987#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1099 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1047#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond988#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1048#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond989#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1049#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1100 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1050#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1101 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1051#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond990#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1102 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1052#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond991#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1103 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1053#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1104 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond992#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1054#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1055#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond227#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1105 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond993#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1106 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1056#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1107 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1057#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond62#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond994#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 445 BD: 1 +.+.: sk_lock-AF_RXRPC ->&rq->__lock ->slock-AF_RXRPC ->&rxnet->local_mutex FD: 1 BD: 2 +...: slock-AF_RXRPC FD: 231 BD: 1 +.+.: (wq_completion)bond1058#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond995#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond996#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1108 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1109 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1059#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1060#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1110 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1061#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond997#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 137 BD: 69 +.+.: &tn->idrinfo->lock#5 ->fs_reclaim ->pool_lock#2 FD: 231 BD: 1 +.+.: (wq_completion)bond1111 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1062#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond998#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1112 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1063#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond999#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1113 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1114 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1064#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1000#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1115 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1065#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1066#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1116 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1067#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1001#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1117 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1118 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1068#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1119 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1120 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1069#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1002#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1003#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1121 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1122 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1070#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1005#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1123 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1071#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1006#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1072#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1007#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1124 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1073#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 2 +.+.: &pipe->mutex#2/2 FD: 231 BD: 1 +.+.: (wq_completion)bond1008#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1074#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1125 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1009#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1126 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1075#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1076#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1077#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1078#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1010#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 504 BD: 1 +.+.: &ctx->tx_lock ->sk_lock-AF_INET6 ->slock-AF_INET6 FD: 504 BD: 4 +.+.: (work_completion)(&(&sw_ctx_tx->tx_work.work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1079#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 75 +...: &sw_ctx_tx->encrypt_compl_lock FD: 231 BD: 1 +.+.: (wq_completion)bond1127 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1080#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1012#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1128 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1129 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1081#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1130 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1082#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1083#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1084#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1013#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1085#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1014#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1131 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1086#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1015#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond63#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1087#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1088#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1132 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1016#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1089#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1133 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1017#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 137 BD: 69 +.+.: &pn->all_ppp_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->&rq->__lock ->&obj_hash[i].lock FD: 1 BD: 70 +...: &ppp->rlock FD: 2 BD: 69 +...: &ppp->wlock ->&ppp->rlock FD: 231 BD: 1 +.+.: (wq_completion)bond1134 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 69 +...: &dev_addr_list_lock_key#4 FD: 231 BD: 1 +.+.: (wq_completion)bond1135 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1018#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1136 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1020#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1137 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1090#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1091#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1021#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1138 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1092#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1022#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1139 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1023#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1140 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1093#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1094#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1024#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1141 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond228#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1095#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1142 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1025#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1026#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1027#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1143 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1096#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1144 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1028#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1097#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1145 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1029#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1098#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1146 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1099#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1030#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1100#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1031#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1147 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1101#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1102#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1032#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1148 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1103#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1033#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1149 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1104#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1150 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1034#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1106#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1151 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1035#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1107#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1036#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1108#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1152 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond64#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1037#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond231#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1153 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1038#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond232#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1109#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1154 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1039#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond233#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1110#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1111#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 148 BD: 2 +.+.: crypto_default_rng_lock ->crypto_alg_sem ->fs_reclaim ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->pool_lock#2 ->&drbg->drbg_mutex FD: 231 BD: 1 +.+.: (wq_completion)bond1040#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1041#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1112#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1155 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1042#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1113#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1156 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1114#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 165 BD: 2 +.+.: (work_completion)(&(&local->roc_work)->work) ->&rq->__lock ->&local->mtx FD: 32 BD: 1 ..-.: &(&local->roc_work)->timer FD: 231 BD: 1 +.+.: (wq_completion)bond1157 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1043#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1115#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1158 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1044#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1116#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1159 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1045#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 139 BD: 3 +.+.: &sb->s_type->i_mutex_key#9/1 ->&rq->__lock ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&dentry->d_lock ->proc_subdir_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 231 BD: 1 +.+.: (wq_completion)bond1160 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1046#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1117#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1118#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1161 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1162 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1047#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1119#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1163 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1120#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1164 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1121#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1049#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1050#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1165 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1122#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1123#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1051#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1166 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1124#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1052#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1167 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1125#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1053#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1126#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1168 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1127#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1054#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1169 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1128#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1170 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1129#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1171 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1130#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1055#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1172 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1131#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1056#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: nopage_rs.lock FD: 3 BD: 6 ....: kernfs_pr_cont_lock ->kernfs_rename_lock ->(console_sem).lock FD: 231 BD: 1 +.+.: (wq_completion)bond1173 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1132#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1133#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1057#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1174 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1134#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1058#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1175 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1135#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1059#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1176 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1060#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1177 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1136#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1061#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1178 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1137#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1062#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1179 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1180 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1063#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1181 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1138#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1064#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1182 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1139#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1065#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1140#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1066#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1183 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1141#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1067#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1068#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1184 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1142#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1185 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1143#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1186 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1144#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1187 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1069#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1145#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1146#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1070#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1188 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond234#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1147#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1071#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1189 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond235#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1148#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1072#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1073#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1190 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1149#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1074#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1191 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1150#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1075#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond65#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1192 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1151#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1076#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1193 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1152#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1077#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1194 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1153#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1078#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1195 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1154#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1079#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1155#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1080#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1196 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1156#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1081#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1197 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1157#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1082#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1198 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1158#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1083#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1159#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1084#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1160#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1085#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1199 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1161#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1086#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1200 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1162#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1087#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1201 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1088#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1163#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1202 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond1089#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1164#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1090#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1165#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond236#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1203 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1166#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1091#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1167#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1204 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: _rs.lock#19 FD: 231 BD: 1 +.+.: (wq_completion)bond1092#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1168#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1205 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1169#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1206 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: _rs.lock#20 FD: 231 BD: 1 +.+.: (wq_completion)bond1207 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1170#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 231 BD: 1 +.+.: (wq_completion)bond1208 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1209 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1093#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1210 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1094#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1211 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1212 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1095#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1171#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1096#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1172#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 32 BD: 1 ..-.: &(&bond->slave_arr_work)->timer FD: 231 BD: 1 +.+.: (wq_completion)bond1213 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1097#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1173#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1098#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1174#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1099#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1214 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1175#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1100#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1176#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1215 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1101#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond67#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1102#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1177#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1216 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1178#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1103#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1217 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1179#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1104#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1218 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1105#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1219 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1180#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1220 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1181#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1106#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1182#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1107#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1221 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1108#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1183#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1222 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1109#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1184#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1223 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1185#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1224 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1110#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1186#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1225 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1187#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1226 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1188#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1227 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1189#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1111#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1228 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1229 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1112#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1190#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1230 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1113#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1191#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1192#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1231 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1114#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1232 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1115#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1193#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1194#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1116#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1233 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1117#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1195#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1234 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1118#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1196#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1235 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1119#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1197#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1236 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1120#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1198#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1237 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1121#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1199#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1238 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1122#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1200#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1123#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1201#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1124#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1202#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1240 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1241 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1242 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1126#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1204#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1243 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1127#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1244 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1128#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1129#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1205#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1245 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1130#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1206#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1246 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1131#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond11#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1132#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1247 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1248 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1249 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1133#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1207#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1250 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1208#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1252 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1251 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1134#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1209#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1253 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1135#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1210#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1136#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1211#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1254 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1137#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1212#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1255 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1138#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1213#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1256 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1139#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1214#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1257 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1140#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1215#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1258 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1141#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond12#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1259 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond13#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1260 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1142#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond14#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1143#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1216#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1144#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1217#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1261 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1145#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1218#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1262 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1146#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1219#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1263 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1147#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1264 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1148#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1220#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1265 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1149#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1221#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1266 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1150#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1222#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1151#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1267 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1268 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1152#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1223#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1269 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1153#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1224#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1270 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1154#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1225#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1271 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1155#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1226#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1272 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 231 BD: 1 +.+.: (wq_completion)bond1156#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1227#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1273 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1157#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1228#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1274 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1158#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1229#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1275 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1159#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 231 BD: 1 +.+.: (wq_completion)bond1230#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) all lock chains: irq_context: 0 &obj_hash[i].lock irq_context: 0 &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex irq_context: 0 (console_sem).lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_hotplug_lock static_call_mutex irq_context: 0 cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 console_mutex irq_context: 0 console_mutex syslog_lock irq_context: 0 console_mutex (console_sem).lock irq_context: 0 console_mutex console_lock console_srcu console_owner_lock irq_context: 0 console_mutex console_lock console_srcu console_owner irq_context: 0 console_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 console_lock console_srcu console_owner_lock irq_context: 0 console_lock console_srcu console_owner irq_context: 0 console_lock console_srcu console_owner console_owner_lock irq_context: 0 input_pool.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 clocksource_mutex irq_context: 0 clocksource_mutex watchdog_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 resource_lock irq_context: 0 cache_disable_lock irq_context: 0 pgd_lock irq_context: 0 init_mm.page_table_lock irq_context: 0 init_mm.page_table_lock pgd_lock irq_context: 0 early_pfn_lock irq_context: 0 acpi_ioapic_lock irq_context: 0 acpi_ioapic_lock ioapic_lock irq_context: 0 acpi_ioapic_lock (console_sem).lock irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 syscore_ops_lock irq_context: 0 map_entries_lock irq_context: 0 devtree_lock irq_context: 0 pcpu_lock irq_context: 0 param_lock irq_context: 0 base_crng.lock irq_context: 0 crng_init_wait.lock irq_context: 0 zonelist_update_seq irq_context: 0 zonelist_update_seq zonelist_update_seq.seqcount irq_context: 0 dmar_global_lock irq_context: 0 &zone->lock irq_context: 0 &zone->lock &____s->seqcount irq_context: 0 &pcp->lock &zone->lock irq_context: 0 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &____s->seqcount irq_context: 0 pool_lock#2 irq_context: 0 pcpu_alloc_mutex irq_context: 0 pcpu_alloc_mutex pcpu_lock irq_context: 0 &n->list_lock irq_context: 0 &c->lock irq_context: 0 slab_mutex irq_context: 0 slab_mutex pool_lock#2 irq_context: 0 slab_mutex &c->lock irq_context: 0 slab_mutex &n->list_lock irq_context: 0 slab_mutex pcpu_alloc_mutex irq_context: 0 slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 batched_entropy_u64.lock irq_context: 0 batched_entropy_u64.lock crngs.lock irq_context: 0 batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 espfix_init_mutex irq_context: 0 espfix_init_mutex &pcp->lock &zone->lock irq_context: 0 espfix_init_mutex &zone->lock irq_context: 0 espfix_init_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 espfix_init_mutex &____s->seqcount irq_context: 0 espfix_init_mutex pool_lock#2 irq_context: 0 percpu_counters_lock irq_context: 0 &mm->page_table_lock irq_context: 0 ptlock_ptr(page) irq_context: 0 ptlock_ptr(page)#2 irq_context: 0 trace_types_lock irq_context: 0 panic_notifier_list.lock irq_context: 0 die_chain.lock irq_context: 0 trace_event_sem irq_context: 0 batched_entropy_u32.lock irq_context: 0 batched_entropy_u32.lock crngs.lock irq_context: 0 &rq->__lock irq_context: 0 &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 init_task.pi_lock irq_context: 0 init_task.pi_lock &rq->__lock irq_context: 0 init_task.vtime_seqcount irq_context: 0 slab_mutex &pcp->lock &zone->lock irq_context: 0 slab_mutex &zone->lock irq_context: 0 slab_mutex &____s->seqcount irq_context: 0 wq_pool_mutex irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 wq_pool_mutex &zone->lock irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 wq_pool_mutex &____s->seqcount irq_context: 0 wq_pool_mutex pool_lock#2 irq_context: 0 wq_pool_mutex &c->lock irq_context: 0 &wq->mutex irq_context: 0 &wq->mutex &pool->lock irq_context: 0 wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 shrinker_rwsem irq_context: 0 rcu_node_0 irq_context: 0 rcu_state.barrier_lock irq_context: 0 rcu_state.barrier_lock rcu_node_0 irq_context: 0 &rnp->exp_poll_lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock pool_lock#2 irq_context: 0 trace_event_sem trace_event_ida.xa_lock &c->lock irq_context: 0 trigger_cmd_mutex irq_context: 0 i8259A_lock irq_context: 0 rcu_read_lock pool_lock#2 irq_context: 0 irq_domain_mutex irq_context: 0 free_vmap_area_lock irq_context: 0 vmap_area_lock irq_context: 0 &irq_desc_lock_class irq_context: 0 vmap_purge_lock irq_context: 0 vmap_purge_lock purge_vmap_area_lock irq_context: 0 cpa_lock irq_context: 0 cpa_lock pgd_lock irq_context: 0 timekeeper_lock irq_context: 0 timekeeper_lock tk_core.seq.seqcount irq_context: 0 timekeeper_lock tk_core.seq.seqcount &obj_hash[i].lock irq_context: 0 tk_core.seq.seqcount irq_context: 0 &base->lock irq_context: 0 &base->lock &obj_hash[i].lock irq_context: 0 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 rcu_read_lock &pool->lock/1 irq_context: 0 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pmus_lock irq_context: 0 pmus_lock pcpu_alloc_mutex irq_context: 0 pmus_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 pmus_lock pool_lock#2 irq_context: 0 pmus_lock &obj_hash[i].lock irq_context: 0 &swhash->hlist_mutex irq_context: 0 pmus_lock &cpuctx_mutex irq_context: 0 tty_ldiscs_lock irq_context: 0 console_lock irq_context: 0 console_lock resource_lock irq_context: 0 console_lock pool_lock#2 irq_context: 0 console_lock &obj_hash[i].lock irq_context: 0 console_lock &pcp->lock &zone->lock irq_context: 0 console_lock &zone->lock irq_context: 0 console_lock &____s->seqcount irq_context: 0 console_lock &c->lock irq_context: 0 console_lock kbd_event_lock irq_context: 0 console_lock kbd_event_lock led_lock irq_context: 0 console_lock vga_lock irq_context: 0 console_lock (console_sem).lock irq_context: 0 console_lock console_owner_lock irq_context: 0 console_mutex &port_lock_key irq_context: 0 console_mutex console_lock irq_context: 0 console_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 console_mutex console_srcu_srcu_usage.lock irq_context: 0 console_mutex console_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 console_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 console_mutex console_srcu irq_context: 0 console_lock console_srcu console_owner &port_lock_key irq_context: 0 init_task.alloc_lock irq_context: 0 acpi_ioremap_lock irq_context: 0 acpi_ioremap_lock pool_lock#2 irq_context: 0 acpi_ioremap_lock resource_lock irq_context: 0 acpi_ioremap_lock memtype_lock irq_context: 0 acpi_ioremap_lock free_vmap_area_lock irq_context: 0 acpi_ioremap_lock vmap_area_lock irq_context: 0 semaphore->lock irq_context: 0 *(&acpi_gbl_reference_count_lock) irq_context: 0 clockevents_lock irq_context: 0 clockevents_lock tk_core.seq.seqcount irq_context: 0 clockevents_lock tick_broadcast_lock irq_context: 0 clockevents_lock i8253_lock irq_context: 0 &desc->request_mutex irq_context: 0 &desc->request_mutex &irq_desc_lock_class irq_context: 0 &desc->request_mutex &irq_desc_lock_class i8259A_lock irq_context: 0 ioapic_lock irq_context: 0 ioapic_mutex irq_context: 0 ioapic_mutex &domain->mutex irq_context: 0 ioapic_mutex &domain->mutex pool_lock#2 irq_context: 0 ioapic_mutex &domain->mutex vector_lock irq_context: 0 ioapic_mutex &domain->mutex &irq_desc_lock_class irq_context: 0 ioapic_mutex &domain->mutex i8259A_lock irq_context: 0 ioapic_mutex &domain->mutex &c->lock irq_context: 0 ioapic_mutex &domain->mutex &pcp->lock &zone->lock irq_context: 0 ioapic_mutex &domain->mutex &zone->lock irq_context: 0 ioapic_mutex &domain->mutex &____s->seqcount irq_context: 0 vector_lock irq_context: 0 &pool->lock#2 irq_context: hardirq jiffies_lock irq_context: hardirq jiffies_lock jiffies_seq.seqcount irq_context: hardirq hrtimer_bases.lock irq_context: hardirq hrtimer_bases.lock tk_core.seq.seqcount irq_context: hardirq log_wait.lock irq_context: softirq drivers/char/random.c:1010 irq_context: softirq drivers/char/random.c:1010 input_pool.lock irq_context: 0 spec_ctrl_mutex irq_context: 0 spec_ctrl_mutex cpu_hotplug_lock irq_context: 0 slab_mutex rcu_read_lock pool_lock#2 irq_context: 0 slab_mutex &obj_hash[i].lock irq_context: 0 sysctl_lock irq_context: 0 tomoyo_policy_lock irq_context: 0 tomoyo_policy_lock pool_lock#2 irq_context: 0 aa_secids.xa_lock irq_context: 0 aa_secids.xa_lock pool_lock#2 irq_context: 0 aa_buffers_lock irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex irq_context: 0 rtnl_mutex &c->lock irq_context: 0 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &zone->lock irq_context: 0 rtnl_mutex &____s->seqcount irq_context: 0 rtnl_mutex pool_lock#2 irq_context: 0 lock irq_context: 0 lock kernfs_idr_lock irq_context: 0 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem irq_context: 0 file_systems_lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_lock irq_context: 0 &type->s_umount_key/1 irq_context: 0 &type->s_umount_key/1 pool_lock#2 irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key/1 shrinker_rwsem irq_context: 0 &type->s_umount_key/1 shrinker_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key/1 list_lrus_mutex irq_context: 0 &type->s_umount_key/1 sb_lock irq_context: 0 &type->s_umount_key/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key/1 percpu_counters_lock irq_context: 0 &type->s_umount_key/1 crngs.lock irq_context: 0 &type->s_umount_key/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key/1 &zone->lock irq_context: 0 &type->s_umount_key/1 &____s->seqcount irq_context: 0 &type->s_umount_key/1 &c->lock irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key irq_context: 0 &type->s_umount_key/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &type->s_umount_key/1 &dentry->d_lock irq_context: 0 mnt_id_ida.xa_lock irq_context: 0 &dentry->d_lock irq_context: 0 mount_lock irq_context: 0 mount_lock mount_lock.seqcount irq_context: 0 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 irq_context: 0 &type->s_umount_key#2/1 pool_lock#2 irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#2/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#2/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#2/1 sb_lock irq_context: 0 &type->s_umount_key#2/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#2/1 &zone->lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &c->lock irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 irq_context: 0 &type->s_umount_key#2/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#2/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 &dentry->d_lock irq_context: 0 ucounts_lock irq_context: 0 proc_inum_ida.xa_lock irq_context: 0 init_fs.lock irq_context: 0 init_fs.lock init_fs.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 irq_context: 0 &type->s_umount_key#3/1 pool_lock#2 irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#3/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#3/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#3/1 sb_lock irq_context: 0 &type->s_umount_key#3/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#3/1 &zone->lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &c->lock irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#3/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#3/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 &dentry->d_lock irq_context: 0 &type->s_umount_key#3/1 &dentry->d_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 proc_subdir_lock irq_context: 0 proc_subdir_lock irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 &type->s_umount_key#4/1 irq_context: 0 &type->s_umount_key#4/1 pool_lock#2 irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#4/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#4/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#4/1 sb_lock irq_context: 0 &type->s_umount_key#4/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 irq_context: 0 &type->s_umount_key#4/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#4/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 &type->s_umount_key#4/1 &dentry->d_lock irq_context: 0 cgroup_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex pool_lock#2 irq_context: 0 cgroup_mutex lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cgroup_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex cgroup_file_kn_lock irq_context: 0 cgroup_mutex css_set_lock irq_context: 0 lock cgroup_idr_lock irq_context: 0 lock cgroup_idr_lock pool_lock#2 irq_context: 0 cpuset_mutex irq_context: 0 cpuset_mutex callback_lock irq_context: 0 cgroup_mutex &c->lock irq_context: 0 cgroup_mutex &____s->seqcount irq_context: 0 cgroup_mutex blkcg_pol_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock &c->lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock &pcp->lock &zone->lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock &zone->lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock &____s->seqcount irq_context: 0 cgroup_mutex lock cgroup_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &n->list_lock irq_context: 0 cgroup_mutex &pcp->lock &zone->lock irq_context: 0 cgroup_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cgroup_mutex percpu_counters_lock irq_context: 0 cgroup_mutex shrinker_rwsem irq_context: 0 cgroup_mutex shrinker_rwsem pool_lock#2 irq_context: 0 cgroup_mutex shrinker_rwsem &c->lock irq_context: 0 cgroup_mutex shrinker_rwsem &n->list_lock irq_context: 0 cgroup_mutex shrinker_rwsem &____s->seqcount irq_context: 0 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex &base->lock irq_context: 0 cgroup_mutex &base->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex batched_entropy_u8.lock irq_context: 0 cgroup_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 cgroup_mutex &pgdat->memcg_lru.lock irq_context: 0 cgroup_mutex devcgroup_mutex irq_context: 0 cgroup_mutex &zone->lock irq_context: 0 cgroup_mutex cpu_hotplug_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 init_sighand.siglock irq_context: 0 init_files.file_lock irq_context: 0 lock pidmap_lock irq_context: 0 lock pidmap_lock pool_lock#2 irq_context: 0 pidmap_lock irq_context: 0 cgroup_threadgroup_rwsem irq_context: 0 cgroup_threadgroup_rwsem css_set_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_task.pi_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_sighand.siglock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock irq_context: 0 &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &p->pi_lock irq_context: 0 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (kthreadd_done).wait.lock irq_context: 0 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock irq_context: 0 &p->alloc_lock irq_context: 0 &p->alloc_lock &____s->seqcount#2 irq_context: 0 fs_reclaim irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 wq_pool_mutex &pool->lock/1 irq_context: 0 wq_pool_mutex fs_reclaim irq_context: 0 wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex kthread_create_lock irq_context: 0 wq_pool_mutex &p->pi_lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_mutex &rq->__lock irq_context: 0 kthread_create_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 wq_pool_mutex &x->wait irq_context: 0 wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait irq_context: 0 &x->wait &p->pi_lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_mutex &obj_hash[i].lock irq_context: 0 wq_pool_attach_mutex irq_context: 0 wq_mayday_lock irq_context: 0 &xa->xa_lock irq_context: 0 &pool->lock irq_context: 0 &pool->lock &p->pi_lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&pool->mayday_timer) irq_context: 0 &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) &rnp->exp_poll_lock irq_context: 0 &pool->lock/1 irq_context: 0 &pool->lock/1 &p->pi_lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&wq_watchdog_timer) irq_context: 0 &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) allocation_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq allocation_wait.lock irq_context: hardirq allocation_wait.lock &p->pi_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 batched_entropy_u8.lock irq_context: 0 kfence_freelist_lock irq_context: 0 rcu_tasks.cbs_gbl_lock irq_context: 0 rcu_tasks.cbs_gbl_lock rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.cbs_gbl_lock rcu_tasks__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_tasks.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks.cbs_gbl_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 rcu_tasks.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#3 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace.cbs_gbl_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (null) irq_context: 0 (null) tk_core.seq.seqcount irq_context: softirq &(&ssp->srcu_sup->work)->timer irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex kernel/rcu/tasks.h:147 irq_context: softirq &(&kfence_timer)->timer irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&timer.timer) irq_context: softirq (&timer.timer) &p->pi_lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nmi_desc[0].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock smpboot_threads_lock batched_entropy_u8.lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kfence_freelist_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &c->lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &n->list_lock irq_context: 0 &rcu_state.gp_wq irq_context: 0 &stop_pi_lock irq_context: 0 &stop_pi_lock &rq->__lock irq_context: 0 &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &stopper->lock irq_context: 0 (module_notify_list).rwsem irq_context: 0 ddebug_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &obj_hash[i].lock irq_context: softirq rcu_callback pool_lock#2 irq_context: 0 &pmus_srcu irq_context: 0 watchdog_mutex irq_context: 0 watchdog_mutex cpu_hotplug_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 watchdog_mutex cpu_hotplug_lock &x->wait#4 irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)events irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#5 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &newf->file_lock irq_context: 0 init_fs.lock &dentry->d_lock irq_context: 0 &p->vtime.seqcount irq_context: 0 cpu_hotplug_lock mem_hotplug_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.waiters.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpuset_hotplug_work irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &swhash->hlist_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock &cpuctx_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcp_batch_high_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &xa->xa_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock wq_pool_attach_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock relay_channels_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &c->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &n->list_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pcp->lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &____s->seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock text_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock free_vmap_area_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock vmap_area_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock init_mm.page_table_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock (console_sem).lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock console_lock console_srcu console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock console_lock console_srcu console_owner irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rtc_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock tk_core.seq.seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock rtc_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &x->wait#6 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rq->__lock &rq->__lock/1 irq_context: 0 &rq->__lock/1 irq_context: 0 &x->wait#6 irq_context: 0 &x->wait#6 &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait#6 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock &irq_desc_lock_class irq_context: 0 cpu_hotplug_lock cpuhp_state-up &swhash->hlist_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock &cpuctx_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#5 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &x->wait#7 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &pool->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_node_0 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex text_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock rcu_read_lock &cfs_b->lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock cpu_hotplug_lock.waiters.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex (console_sem).lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_hotplug_lock stop_cpus_mutex irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 &x->wait#8 irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &x->wait#8 irq_context: 0 sched_domains_mutex irq_context: 0 sched_domains_mutex fs_reclaim irq_context: 0 sched_domains_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sched_domains_mutex pool_lock#2 irq_context: 0 sched_domains_mutex &obj_hash[i].lock irq_context: 0 sched_domains_mutex pcpu_alloc_mutex irq_context: 0 sched_domains_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sched_domains_mutex &pcp->lock &zone->lock irq_context: 0 sched_domains_mutex &zone->lock irq_context: 0 sched_domains_mutex &____s->seqcount irq_context: 0 sched_domains_mutex rcu_read_lock pool_lock#2 irq_context: 0 sched_domains_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sched_domains_mutex &c->lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &cp->lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 sched_domains_mutex pcpu_lock irq_context: 0 slab_mutex fs_reclaim irq_context: 0 slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (memory_chain).rwsem irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 irq_context: 0 &type->s_umount_key#5/1 fs_reclaim irq_context: 0 &type->s_umount_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 pool_lock#2 irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#5/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#5/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#5/1 sb_lock irq_context: 0 &type->s_umount_key#5/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#5/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#5/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#5/1 crngs.lock irq_context: 0 &type->s_umount_key#5/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#5/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 irq_context: 0 &type->s_umount_key#5/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#5/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#5/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &type->s_umount_key#5/1 &dentry->d_lock irq_context: 0 (setup_done).wait.lock irq_context: 0 namespace_sem irq_context: 0 namespace_sem fs_reclaim irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 namespace_sem &pcp->lock &zone->lock irq_context: 0 namespace_sem &zone->lock irq_context: 0 namespace_sem &____s->seqcount irq_context: 0 namespace_sem pool_lock#2 irq_context: 0 namespace_sem &c->lock irq_context: 0 namespace_sem mnt_id_ida.xa_lock irq_context: 0 namespace_sem pcpu_alloc_mutex irq_context: 0 namespace_sem pcpu_alloc_mutex pcpu_lock irq_context: 0 namespace_sem &dentry->d_lock irq_context: 0 namespace_sem mount_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &p->alloc_lock init_fs.lock irq_context: 0 rcu_read_lock &____s->seqcount#3 irq_context: 0 file_systems_lock irq_context: 0 &type->s_umount_key#6 irq_context: 0 &type->s_umount_key#6 fs_reclaim irq_context: 0 &type->s_umount_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#6 pool_lock#2 irq_context: 0 &type->s_umount_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#6 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#6 &zone->lock irq_context: 0 &type->s_umount_key#6 &____s->seqcount irq_context: 0 &type->s_umount_key#6 &c->lock irq_context: 0 &type->s_umount_key#6 &lru->node[i].lock irq_context: 0 &type->s_umount_key#6 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#6 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key namespace_sem irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key namespace_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 rcu_read_lock &____s->seqcount#4 irq_context: 0 &sb->s_type->i_lock_key#5 irq_context: 0 &fs->lock irq_context: 0 &fs->lock &____s->seqcount#3 irq_context: 0 (setup_done).wait.lock &p->pi_lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &rq->__lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 req_lock irq_context: 0 of_mutex irq_context: 0 of_mutex fs_reclaim irq_context: 0 of_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 of_mutex pool_lock#2 irq_context: 0 of_mutex lock irq_context: 0 of_mutex lock kernfs_idr_lock irq_context: 0 of_mutex &root->kernfs_rwsem irq_context: 0 of_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &x->wait#9 irq_context: 0 &k->list_lock irq_context: 0 bus_type_sem irq_context: 0 &root->kernfs_rwsem irq_context: 0 &dev->power.lock irq_context: 0 dpm_list_mtx irq_context: 0 uevent_sock_mutex irq_context: 0 running_helpers_waitq.lock irq_context: 0 sysfs_symlink_target_lock irq_context: 0 &k->k_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex &dev->power.lock irq_context: 0 subsys mutex irq_context: 0 memory_blocks.xa_lock irq_context: 0 memory_blocks.xa_lock pool_lock#2 irq_context: softirq rcu_callback mem_hotplug_lock.rss.gp_wait.lock irq_context: softirq rcu_callback mem_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: softirq rcu_callback cpu_hotplug_lock.rss.gp_wait.lock irq_context: softirq rcu_callback cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (console_sem).lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 lock kernfs_idr_lock &c->lock irq_context: 0 lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 lock kernfs_idr_lock &zone->lock irq_context: 0 lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock kernfs_idr_lock &____s->seqcount irq_context: 0 subsys mutex#2 irq_context: hardirq &rq->__lock &cfs_rq->removed.lock irq_context: 0 register_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock fs_reclaim irq_context: 0 register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock &c->lock irq_context: 0 register_lock &pcp->lock &zone->lock irq_context: 0 register_lock &zone->lock irq_context: 0 register_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 register_lock proc_inum_ida.xa_lock &zone->lock irq_context: 0 register_lock proc_inum_ida.xa_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock &c->lock irq_context: 0 register_lock proc_inum_ida.xa_lock rcu_read_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock &obj_hash[i].lock irq_context: 0 register_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (pm_chain_head).rwsem irq_context: 0 cpufreq_governor_mutex irq_context: 0 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dyn_event_ops_mutex irq_context: 0 binfmt_lock irq_context: 0 pin_fs_lock irq_context: 0 &type->s_umount_key#7/1 irq_context: 0 &type->s_umount_key#7/1 fs_reclaim irq_context: 0 &type->s_umount_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7/1 pool_lock#2 irq_context: 0 &type->s_umount_key#7/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#7/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#7/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#7/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#7/1 sb_lock irq_context: 0 &type->s_umount_key#7/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#7/1 &____s->seqcount irq_context: 0 &type->s_umount_key#7/1 &c->lock irq_context: 0 &type->s_umount_key#7/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7/1 &sb->s_type->i_lock_key#6 irq_context: 0 &type->s_umount_key#7/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#7/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#7/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#7/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#7/1 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#7/1 &dentry->d_lock irq_context: 0 rcu_read_lock mount_lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#6 irq_context: 0 &sb->s_type->i_mutex_key#2 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 chrdevs_lock irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 irq_context: 0 &type->s_umount_key#8/1 fs_reclaim irq_context: 0 &type->s_umount_key#8/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#8/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#8/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#8/1 sb_lock irq_context: 0 &type->s_umount_key#8/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#8/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 irq_context: 0 &type->s_umount_key#8/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#8/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &type->s_umount_key#8/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 subsys mutex#3 irq_context: 0 async_lock irq_context: 0 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &dev->power.lock irq_context: 0 regulator_list_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &dev->devres_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_nesting_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_ww_class_mutex regulator_nesting_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex devtree_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#4 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_list_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_done.lock irq_context: 0 &type->s_umount_key#9/1 irq_context: 0 &type->s_umount_key#9/1 fs_reclaim irq_context: 0 &type->s_umount_key#9/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 pool_lock#2 irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#9/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#9/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#9/1 sb_lock irq_context: 0 &type->s_umount_key#9/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#9/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#9/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#9/1 &____s->seqcount irq_context: 0 &type->s_umount_key#9/1 &c->lock irq_context: 0 &type->s_umount_key#9/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 irq_context: 0 &type->s_umount_key#9/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#9/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &type->s_umount_key#9/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pack_mutex irq_context: 0 pack_mutex fs_reclaim irq_context: 0 pack_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pack_mutex &pcp->lock &zone->lock irq_context: 0 pack_mutex &zone->lock irq_context: 0 pack_mutex &____s->seqcount irq_context: 0 pack_mutex pool_lock#2 irq_context: 0 pack_mutex free_vmap_area_lock irq_context: 0 pack_mutex vmap_area_lock irq_context: 0 pack_mutex init_mm.page_table_lock irq_context: 0 pack_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex rcu_read_lock rcu_node_0 irq_context: 0 pack_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex batched_entropy_u8.lock irq_context: 0 pack_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 pack_mutex kfence_freelist_lock irq_context: 0 pack_mutex vmap_purge_lock irq_context: 0 pack_mutex vmap_purge_lock purge_vmap_area_lock irq_context: 0 pack_mutex cpa_lock irq_context: 0 pack_mutex cpa_lock pgd_lock irq_context: 0 text_mutex irq_context: 0 text_mutex ptlock_ptr(page)#2 irq_context: 0 &fp->aux->used_maps_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 proto_list_mutex irq_context: 0 targets_mutex irq_context: 0 nl_table_lock irq_context: 0 nl_table_wait.lock irq_context: 0 net_family_lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &zone->lock irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 rtnl_mutex fs_reclaim irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock irq_context: 0 sparse_irq_lock fs_reclaim irq_context: 0 sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock irq_context: 0 sparse_irq_lock &root->kernfs_rwsem irq_context: 0 sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sparse_irq_lock &c->lock irq_context: 0 sparse_irq_lock &pcp->lock &zone->lock irq_context: 0 sparse_irq_lock &zone->lock irq_context: 0 sparse_irq_lock &____s->seqcount irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &zone->lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock &stopper->lock irq_context: 0 rcu_read_lock &stop_pi_lock irq_context: 0 rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 freezer_lock irq_context: 0 audit_backlog_wait.lock irq_context: 0 kauditd_wait.lock irq_context: 0 &list->lock irq_context: 0 kauditd_wait.lock &p->pi_lock irq_context: 0 lock#2 irq_context: 0 lock#2 &zone->lock irq_context: 0 pcp_batch_high_lock irq_context: 0 khugepaged_mutex irq_context: 0 gdp_mutex irq_context: 0 gdp_mutex &k->list_lock irq_context: 0 gdp_mutex fs_reclaim irq_context: 0 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 gdp_mutex pool_lock#2 irq_context: 0 gdp_mutex lock irq_context: 0 gdp_mutex lock kernfs_idr_lock irq_context: 0 gdp_mutex &root->kernfs_rwsem irq_context: 0 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 subsys mutex#5 irq_context: 0 subsys mutex#5 &k->k_lock irq_context: 0 subsys mutex#6 irq_context: 0 subsys mutex#6 &k->list_lock irq_context: 0 subsys mutex#6 &k->k_lock irq_context: 0 regmap_debugfs_early_lock irq_context: 0 (acpi_reconfig_chain).rwsem irq_context: 0 __i2c_board_lock irq_context: 0 core_lock irq_context: 0 core_lock &k->list_lock irq_context: 0 core_lock &k->k_lock irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 nl_table_lock irq_context: 0 thermal_governor_lock irq_context: 0 thermal_governor_lock thermal_list_lock irq_context: 0 cpuidle_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpuidle_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock &obj_hash[i].lock irq_context: 0 cpuidle_lock (console_sem).lock irq_context: 0 cpuidle_lock console_lock console_srcu console_owner_lock irq_context: 0 cpuidle_lock console_lock console_srcu console_owner irq_context: 0 cpuidle_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpuidle_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_lock_key#8 irq_context: 0 &dir->lock irq_context: 0 k-sk_lock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR k-slock-AF_QIPCRTR irq_context: 0 k-slock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_ports.xa_lock irq_context: 0 k-sk_lock-AF_QIPCRTR pool_lock#2 irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_node_lock irq_context: 0 k-sk_lock-AF_QIPCRTR &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: 0 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 crngs.lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 iova_cache_mutex irq_context: 0 iova_cache_mutex cpu_hotplug_lock irq_context: 0 iova_cache_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 iova_cache_mutex slab_mutex irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 iova_cache_mutex slab_mutex pool_lock#2 irq_context: 0 iova_cache_mutex slab_mutex &c->lock irq_context: 0 iova_cache_mutex slab_mutex &n->list_lock irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 subsys mutex#7 irq_context: 0 subsys mutex#7 &k->k_lock irq_context: 0 pci_config_lock irq_context: 0 device_links_lock irq_context: 0 subsys mutex#8 irq_context: 0 dev_pm_qos_mtx irq_context: 0 dev_pm_qos_mtx fs_reclaim irq_context: 0 dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dev_pm_qos_mtx pool_lock#2 irq_context: 0 dev_pm_qos_mtx &dev->power.lock irq_context: 0 dev_pm_qos_mtx pm_qos_lock irq_context: 0 dev_pm_qos_sysfs_mtx irq_context: 0 dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 dev_pm_qos_sysfs_mtx lock irq_context: 0 dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtrr_mutex irq_context: 0 mtrr_mutex fs_reclaim irq_context: 0 mtrr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtrr_mutex pool_lock#2 irq_context: 0 uidhash_lock irq_context: 0 &rq->__lock rcu_read_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex (console_sem).lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &disk->open_mutex bdev_lock irq_context: 0 &disk->open_mutex bdev_lock &bdev->bd_holder_lock irq_context: 0 &bdev->bd_fsfreeze_mutex irq_context: 0 oom_reaper_wait.lock irq_context: 0 subsys mutex#9 irq_context: 0 &pgdat->kcompactd_wait irq_context: 0 slab_mutex rcu_read_lock &pool->lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slab_mutex &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &zone->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &c->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 memory_tier_lock irq_context: 0 memory_tier_lock fs_reclaim irq_context: 0 memory_tier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 memory_tier_lock pool_lock#2 irq_context: 0 memory_tier_lock &x->wait#9 irq_context: 0 memory_tier_lock &obj_hash[i].lock irq_context: 0 memory_tier_lock &k->list_lock irq_context: 0 memory_tier_lock lock irq_context: 0 memory_tier_lock lock kernfs_idr_lock irq_context: 0 memory_tier_lock &root->kernfs_rwsem irq_context: 0 memory_tier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 memory_tier_lock bus_type_sem irq_context: 0 memory_tier_lock sysfs_symlink_target_lock irq_context: 0 memory_tier_lock &k->k_lock irq_context: 0 memory_tier_lock &root->kernfs_rwsem irq_context: 0 memory_tier_lock &____s->seqcount irq_context: 0 memory_tier_lock &pcp->lock &zone->lock irq_context: 0 memory_tier_lock &zone->lock irq_context: 0 memory_tier_lock rcu_read_lock pool_lock#2 irq_context: 0 memory_tier_lock &dev->power.lock irq_context: 0 memory_tier_lock dpm_list_mtx irq_context: 0 memory_tier_lock uevent_sock_mutex irq_context: 0 memory_tier_lock running_helpers_waitq.lock irq_context: 0 memory_tier_lock &dev->mutex &k->list_lock irq_context: 0 memory_tier_lock &dev->mutex &k->k_lock irq_context: 0 memory_tier_lock &dev->mutex &dev->power.lock irq_context: 0 memory_tier_lock subsys mutex#10 irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 memory_tier_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rq->__lock irq_context: hardirq &rcu_state.expedited_wq irq_context: 0 khugepaged_mutex fs_reclaim irq_context: 0 khugepaged_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 khugepaged_mutex pool_lock#2 irq_context: 0 khugepaged_mutex kthread_create_lock irq_context: 0 khugepaged_mutex &p->pi_lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 khugepaged_mutex &x->wait irq_context: 0 khugepaged_mutex &rq->__lock irq_context: 0 khugepaged_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ksm_thread_mutex irq_context: 0 ksm_thread_wait.lock irq_context: 0 khugepaged_mutex &obj_hash[i].lock irq_context: 0 khugepaged_mutex lock#2 irq_context: 0 khugepaged_mutex lock#2 &zone->lock irq_context: 0 khugepaged_mutex pcp_batch_high_lock irq_context: 0 damon_ops_lock irq_context: 0 crypto_alg_sem irq_context: 0 crypto_alg_sem (crypto_chain).rwsem irq_context: 0 cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock &wq->mutex irq_context: 0 cpu_hotplug_lock &wq->mutex &pool->lock irq_context: 0 cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock &x->wait irq_context: 0 cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 irq_context: 0 khugepaged_mm_lock irq_context: 0 khugepaged_wait.lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 quarantine_lock irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu quarantine_lock irq_context: 0 cpu_hotplug_lock &c->lock irq_context: 0 cpu_hotplug_lock &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock &zone->lock irq_context: 0 cpu_hotplug_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &wq->mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) wq_pool_mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) pool_lock#2 irq_context: 0 bio_slab_lock irq_context: 0 bio_slab_lock fs_reclaim irq_context: 0 bio_slab_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex irq_context: 0 bio_slab_lock slab_mutex fs_reclaim irq_context: 0 bio_slab_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock slab_mutex pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex &c->lock irq_context: 0 bio_slab_lock slab_mutex &n->list_lock irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock pool_lock#2 irq_context: 0 major_names_lock irq_context: 0 major_names_lock fs_reclaim irq_context: 0 major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 major_names_lock pool_lock#2 irq_context: 0 major_names_lock major_names_spinlock irq_context: 0 console_lock fs_reclaim irq_context: 0 console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock &x->wait#9 irq_context: 0 console_lock &k->list_lock irq_context: 0 console_lock gdp_mutex irq_context: 0 console_lock gdp_mutex &k->list_lock irq_context: 0 console_lock gdp_mutex fs_reclaim irq_context: 0 console_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock gdp_mutex pool_lock#2 irq_context: 0 console_lock gdp_mutex lock irq_context: 0 console_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock lock irq_context: 0 console_lock lock kernfs_idr_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock bus_type_sem irq_context: 0 console_lock sysfs_symlink_target_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &dev->power.lock irq_context: 0 console_lock dpm_list_mtx irq_context: 0 console_lock uevent_sock_mutex irq_context: 0 console_lock running_helpers_waitq.lock irq_context: 0 console_lock subsys mutex#11 irq_context: 0 console_lock subsys mutex#11 &k->k_lock irq_context: 0 &bdev->bd_fsfreeze_mutex sb_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) (&timer.timer) irq_context: 0 rcu_tasks.tasks_gp_mutex (console_sem).lock irq_context: 0 rcu_tasks.tasks_gp_mutex console_owner_lock irq_context: 0 rcu_tasks.tasks_gp_mutex console_owner irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner_lock irq_context: 0 console_owner_lock irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner irq_context: 0 console_owner irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &meta->lock irq_context: 0 *(&acpi_gbl_hardware_lock) irq_context: 0 *(&acpi_gbl_gpe_lock) irq_context: 0 acpi_ioapic_lock ioapic_mutex irq_context: 0 &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 shrink_qlist.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) remove_cache_srcu_srcu_usage.lock irq_context: 0 &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock irq_context: 0 cpu_hotplug_lock flush_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock (work_completion)(&sfw->work) irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock (wq_completion)slub_flushwq irq_context: 0 cpu_hotplug_lock flush_lock &x->wait#10 irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)slub_flushwq irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &c->lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &n->list_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &obj_hash[i].lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &bdev->bd_fsfreeze_mutex fs_reclaim irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &rq->__lock irq_context: 0 &n->list_lock &c->lock irq_context: 0 system_transition_mutex irq_context: 0 (power_off_prep_handler_list).rwsem irq_context: 0 power_off_handler_list.lock irq_context: 0 (restart_prep_handler_list).rwsem irq_context: 0 (reboot_notifier_list).rwsem irq_context: 0 *(&acpi_gbl_gpe_lock) (console_sem).lock irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner_lock irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner &port_lock_key irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock irq_context: 0 acpi_scan_lock semaphore->lock irq_context: 0 acpi_scan_lock fs_reclaim irq_context: 0 acpi_scan_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock pool_lock#2 irq_context: 0 acpi_scan_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &x->wait#9 irq_context: 0 acpi_scan_lock &c->lock irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock &zone->lock irq_context: 0 acpi_scan_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_device_lock irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_device_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock &xa->xa_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_device_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &k->list_lock irq_context: 0 acpi_scan_lock lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock bus_type_sem irq_context: 0 acpi_scan_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &k->k_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->power.lock irq_context: 0 acpi_scan_lock dpm_list_mtx irq_context: 0 acpi_scan_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock subsys mutex#12 irq_context: 0 acpi_scan_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &n->list_lock irq_context: 0 acpi_scan_lock &n->list_lock &c->lock irq_context: 0 acpi_scan_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_device_lock &c->lock irq_context: 0 acpi_scan_lock acpi_device_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_device_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_device_lock &____s->seqcount irq_context: 0 acpi_scan_lock &obj_hash[i].lock pool_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 acpi_scan_lock &rq->__lock irq_context: 0 acpi_scan_lock batched_entropy_u8.lock irq_context: 0 acpi_scan_lock kfence_freelist_lock irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock pci_config_lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &meta->lock irq_context: 0 acpi_scan_lock acpi_device_lock quarantine_lock irq_context: 0 acpi_scan_lock (console_sem).lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_mmcfg_lock irq_context: 0 acpi_scan_lock resource_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &device->physical_node_lock pool_lock#2 irq_context: 0 acpi_scan_lock &device->physical_node_lock lock irq_context: 0 acpi_scan_lock &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock fwnode_link_lock irq_context: 0 acpi_scan_lock fwnode_link_lock &k->k_lock irq_context: 0 acpi_scan_lock devtree_lock irq_context: 0 acpi_scan_lock gdp_mutex irq_context: 0 acpi_scan_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock gdp_mutex lock irq_context: 0 acpi_scan_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock subsys mutex#13 irq_context: 0 acpi_scan_lock subsys mutex#13 &k->k_lock irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_acpi_companion_lookup_sem irq_context: 0 acpi_scan_lock pci_slot_mutex irq_context: 0 acpi_scan_lock tk_core.seq.seqcount irq_context: 0 acpi_scan_lock resource_alignment_lock irq_context: 0 acpi_scan_lock device_links_srcu irq_context: 0 acpi_scan_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 acpi_scan_lock subsys mutex#14 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &x->wait#9 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock bus_type_sem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#15 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock events_lock irq_context: 0 &pgdat->kswapd_lock irq_context: softirq drivers/char/random.c:251 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (next_reseed).work irq_context: 0 (wq_completion)events_unbound (next_reseed).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work input_pool.lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work base_crng.lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &c->lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock pool_lock#2 irq_context: softirq mm/vmstat.c:2018 irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock batched_entropy_u8.lock crngs.lock irq_context: 0 acpi_scan_lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock subsys mutex#3 irq_context: 0 acpi_scan_lock acpi_link_lock irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_link_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_link_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_link_lock pci_config_lock irq_context: 0 acpi_scan_lock acpi_link_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_link_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_link_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_link_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock (console_sem).lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock &c->lock irq_context: 0 acpi_scan_lock acpi_dep_list_lock irq_context: 0 acpi_scan_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 acpi_scan_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 acpi_scan_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock subsys mutex#15 irq_context: 0 acpi_scan_lock subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock events_lock irq_context: 0 acpi_scan_lock acpi_device_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_device_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock power_resource_list_lock irq_context: 0 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_device_lock irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 k-sk_lock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 k-slock-AF_NETLINK irq_context: 0 &type->s_umount_key#10/1 irq_context: 0 &type->s_umount_key#10/1 fs_reclaim irq_context: 0 &type->s_umount_key#10/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 pool_lock#2 irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#10/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#10/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#10/1 sb_lock irq_context: 0 &type->s_umount_key#10/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#10/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#10/1 &zone->lock irq_context: 0 &type->s_umount_key#10/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#10/1 &____s->seqcount irq_context: 0 &type->s_umount_key#10/1 &c->lock irq_context: 0 &type->s_umount_key#10/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 irq_context: 0 &type->s_umount_key#10/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#10/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#10/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 irq_context: 0 &type->s_umount_key#11/1 fs_reclaim irq_context: 0 &type->s_umount_key#11/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 pool_lock#2 irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#11/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#11/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#11/1 sb_lock irq_context: 0 &type->s_umount_key#11/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#11/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 irq_context: 0 &type->s_umount_key#11/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#11/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 &dentry->d_lock irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start dma_fence_map irq_context: 0 delayed_uprobe_lock irq_context: 0 key irq_context: 0 &x->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 attribute_container_mutex irq_context: 0 triggers_list_lock irq_context: 0 leds_list_lock irq_context: 0 bus_type_sem irq_context: 0 (usb_notifier_list).rwsem irq_context: 0 batched_entropy_u8.lock crngs.lock irq_context: 0 &device->physical_node_lock irq_context: 0 rc_map_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 fill_pool_map-wait-type-override pool_lock irq_context: 0 subsys mutex#16 irq_context: 0 resource_lock irq_context: 0 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 free_vmap_area_lock pool_lock#2 irq_context: 0 &entry->access irq_context: 0 info_mutex irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 info_mutex fs_reclaim irq_context: 0 info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 info_mutex pool_lock#2 irq_context: 0 info_mutex proc_inum_ida.xa_lock irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 info_mutex &c->lock irq_context: 0 info_mutex &____s->seqcount irq_context: 0 kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex running_helpers_waitq.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &obj_hash[i].lock irq_context: 0 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock nl_table_lock irq_context: 0 rcu_read_lock nl_table_wait.lock irq_context: 0 qdisc_mod_lock irq_context: 0 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock &____s->seqcount irq_context: 0 bt_proto_lock irq_context: 0 hci_cb_list_lock irq_context: 0 mgmt_chan_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rate_ctrl_mutex irq_context: 0 rate_ctrl_mutex fs_reclaim irq_context: 0 rate_ctrl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rate_ctrl_mutex pool_lock#2 irq_context: 0 netlbl_domhsh_lock irq_context: 0 netlbl_unlhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock pool_lock#2 irq_context: 0 misc_mtx irq_context: 0 misc_mtx fs_reclaim irq_context: 0 misc_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx pool_lock#2 irq_context: 0 misc_mtx &x->wait#9 irq_context: 0 misc_mtx &obj_hash[i].lock irq_context: 0 misc_mtx &k->list_lock irq_context: 0 misc_mtx gdp_mutex irq_context: 0 misc_mtx gdp_mutex &k->list_lock irq_context: 0 misc_mtx gdp_mutex fs_reclaim irq_context: 0 misc_mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx gdp_mutex pool_lock#2 irq_context: 0 misc_mtx gdp_mutex &c->lock irq_context: 0 misc_mtx gdp_mutex &____s->seqcount irq_context: 0 misc_mtx gdp_mutex lock irq_context: 0 misc_mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx lock irq_context: 0 misc_mtx lock kernfs_idr_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx bus_type_sem irq_context: 0 misc_mtx sysfs_symlink_target_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &dev->power.lock irq_context: 0 misc_mtx dpm_list_mtx irq_context: 0 misc_mtx req_lock irq_context: 0 misc_mtx &p->pi_lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx &x->wait#11 irq_context: 0 misc_mtx &rq->__lock irq_context: 0 misc_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers irq_context: 0 misc_mtx rcu_read_lock &rq->__lock irq_context: 0 sb_writers mount_lock irq_context: 0 misc_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 &x->wait#11 irq_context: 0 &x->wait#11 &p->pi_lock irq_context: 0 &x->wait#11 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx uevent_sock_mutex irq_context: 0 misc_mtx running_helpers_waitq.lock irq_context: 0 misc_mtx subsys mutex#18 irq_context: 0 misc_mtx subsys mutex#18 &k->k_lock irq_context: 0 rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex irq_context: 0 input_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex irq_context: 0 input_mutex input_devices_poll_wait.lock irq_context: 0 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &zone->lock irq_context: 0 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (netlink_chain).rwsem irq_context: 0 proto_tab_lock irq_context: 0 resource_lock pool_lock#2 irq_context: 0 resource_lock &obj_hash[i].lock irq_context: 0 random_ready_notifier.lock irq_context: 0 random_ready_notifier.lock crngs.lock irq_context: 0 misc_mtx misc_minors_ida.xa_lock irq_context: 0 misc_mtx &obj_hash[i].lock pool_lock irq_context: 0 misc_mtx lock kernfs_idr_lock &c->lock irq_context: 0 misc_mtx lock kernfs_idr_lock &____s->seqcount irq_context: 0 misc_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 misc_mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 misc_mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 vga_lock#2 irq_context: 0 vga_lock#2 pci_config_lock irq_context: 0 vga_lock#2 (console_sem).lock irq_context: 0 vga_lock#2 console_lock console_srcu console_owner_lock irq_context: 0 vga_lock#2 console_lock console_srcu console_owner irq_context: 0 vga_lock#2 console_lock console_srcu console_owner &port_lock_key irq_context: 0 vga_lock#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 disable_lock irq_context: 0 disable_lock fs_reclaim irq_context: 0 disable_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 disable_lock &c->lock irq_context: 0 disable_lock &____s->seqcount irq_context: 0 disable_lock pool_lock#2 irq_context: 0 disable_lock &x->wait#9 irq_context: 0 disable_lock &obj_hash[i].lock irq_context: 0 disable_lock &k->list_lock irq_context: 0 disable_lock lock irq_context: 0 disable_lock lock kernfs_idr_lock irq_context: 0 disable_lock &root->kernfs_rwsem irq_context: 0 disable_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 disable_lock bus_type_sem irq_context: 0 disable_lock sysfs_symlink_target_lock irq_context: 0 disable_lock &k->k_lock irq_context: 0 disable_lock &root->kernfs_rwsem irq_context: 0 disable_lock &dev->power.lock irq_context: 0 disable_lock dpm_list_mtx irq_context: 0 disable_lock &(&priv->bus_notifier)->rwsem irq_context: 0 disable_lock uevent_sock_mutex irq_context: 0 disable_lock running_helpers_waitq.lock irq_context: 0 disable_lock &dev->mutex &dev->power.lock irq_context: 0 disable_lock &dev->mutex &k->list_lock irq_context: 0 disable_lock &dev->mutex &k->k_lock irq_context: 0 disable_lock subsys mutex#3 irq_context: 0 &type->s_umount_key#12/1 irq_context: 0 &type->s_umount_key#12/1 fs_reclaim irq_context: 0 &type->s_umount_key#12/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 pool_lock#2 irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#12/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#12/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#12/1 sb_lock irq_context: 0 &type->s_umount_key#12/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#12/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 irq_context: 0 &type->s_umount_key#12/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#12/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#12/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#11 irq_context: 0 clocksource_mutex cpu_hotplug_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &x->wait#8 irq_context: 0 clocksource_mutex (console_sem).lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner_lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner irq_context: 0 clocksource_mutex console_lock console_srcu console_owner &port_lock_key irq_context: hardirq tick_broadcast_lock irq_context: hardirq tick_broadcast_lock jiffies_lock irq_context: hardirq hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events timer_update_work irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 clocksource_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &bdev->bd_fsfreeze_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 &c->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 &____s->seqcount irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 sb_lock irq_context: 0 &type->s_umount_key#24/1 irq_context: 0 &type->s_umount_key#24/1 fs_reclaim irq_context: 0 &type->s_umount_key#24/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#24/1 &wq->mutex irq_context: 0 &type->s_umount_key#24/1 &wq->mutex &pool->lock irq_context: 0 &type->s_umount_key#24/1 kthread_create_lock irq_context: 0 &type->s_umount_key#24/1 &p->pi_lock irq_context: 0 &type->s_umount_key#24/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#24/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#24/1 &rq->__lock irq_context: 0 &type->s_umount_key#24/1 &x->wait irq_context: 0 &type->s_umount_key#24/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#24/1 wq_pool_mutex irq_context: 0 &type->s_umount_key#24/1 wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#24/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 &____s->seqcount irq_context: 0 &type->s_umount_key#24/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &type->s_umount_key#24/1 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#24/1 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 lock#4 irq_context: 0 &type->s_umount_key#24/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#24/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#24/1 &dd->lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#24/1 bit_wait_table + i irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq bit_wait_table + i irq_context: softirq bit_wait_table + i &p->pi_lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#24/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#24/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#24/1 batched_entropy_u8.lock irq_context: 0 &type->s_umount_key#24/1 kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) irq_context: 0 &type->s_umount_key#24/1 &wq->mutex &x->wait#10 irq_context: 0 &type->s_umount_key#24/1 wq_mayday_lock irq_context: 0 &type->s_umount_key#24/1 &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#24/1 &zone->lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#5 irq_context: 0 &sb->s_type->i_mutex_key#5 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#5 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#5 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#12 irq_context: 0 &sb->s_type->i_mutex_key#5 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#5 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#12 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#5 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#5 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#5 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#5 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#5 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#14/1 irq_context: 0 &type->s_umount_key#14/1 fs_reclaim irq_context: 0 &type->s_umount_key#14/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14/1 pool_lock#2 irq_context: 0 &type->s_umount_key#14/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#14/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#14/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#14/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#14/1 sb_lock irq_context: 0 &type->s_umount_key#14/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#14/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14/1 &sb->s_type->i_lock_key#13 irq_context: 0 &type->s_umount_key#14/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#14/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#14/1 &sb->s_type->i_lock_key#13 &dentry->d_lock irq_context: 0 &type->s_umount_key#14/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#15/1 irq_context: 0 &type->s_umount_key#15/1 fs_reclaim irq_context: 0 &type->s_umount_key#15/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#15/1 pool_lock#2 irq_context: 0 &type->s_umount_key#15/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#15/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#15/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#15/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#15/1 sb_lock irq_context: 0 &type->s_umount_key#15/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#15/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#15/1 &sb->s_type->i_lock_key#14 irq_context: 0 &type->s_umount_key#15/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#15/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#15/1 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 &type->s_umount_key#15/1 &dentry->d_lock irq_context: 0 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#16/1 irq_context: 0 &type->s_umount_key#16/1 fs_reclaim irq_context: 0 &type->s_umount_key#16/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#16/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#16/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#16/1 sb_lock irq_context: 0 &type->s_umount_key#16/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#16/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#15 irq_context: 0 &type->s_umount_key#16/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#16/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 &type->s_umount_key#16/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#15 irq_context: 0 bio_slab_lock slab_mutex &____s->seqcount irq_context: 0 kclist_lock irq_context: 0 kclist_lock resource_lock irq_context: 0 kclist_lock fs_reclaim irq_context: 0 kclist_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kclist_lock pool_lock#2 irq_context: 0 &type->s_umount_key#17/1 irq_context: 0 &type->s_umount_key#17/1 fs_reclaim irq_context: 0 &type->s_umount_key#17/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 pool_lock#2 irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#17/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#17/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#17/1 sb_lock irq_context: 0 &type->s_umount_key#17/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#17/1 &____s->seqcount irq_context: 0 &type->s_umount_key#17/1 &c->lock irq_context: 0 &type->s_umount_key#17/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#16 irq_context: 0 &type->s_umount_key#17/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#17/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 &type->s_umount_key#17/1 &dentry->d_lock irq_context: 0 misc_mtx &c->lock irq_context: 0 misc_mtx &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &obj_hash[i].lock irq_context: 0 tomoyo_ss irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 tomoyo_ss (console_sem).lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner_lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner irq_context: 0 tomoyo_ss console_lock console_srcu console_owner &port_lock_key irq_context: 0 tomoyo_ss console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#18/1 irq_context: 0 &type->s_umount_key#18/1 fs_reclaim irq_context: 0 &type->s_umount_key#18/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 pool_lock#2 irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#18/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#18/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#18/1 sb_lock irq_context: 0 &type->s_umount_key#18/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#18/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#17 irq_context: 0 &type->s_umount_key#18/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#18/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &type->s_umount_key#18/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &ns->lock irq_context: 0 &ns->lock &dentry->d_lock irq_context: 0 &ns->lock pin_fs_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &sb->s_type->i_lock_key#17 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 rename_lock.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 pool_lock#2 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &dentry->d_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &dentry->d_lock &wq irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 mmu_notifier_invalidate_range_start irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &s->s_inode_list_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 tk_core.seq.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &c->lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &____s->seqcount irq_context: 0 &type->s_umount_key#19 irq_context: 0 &type->s_umount_key#19 sb_lock irq_context: 0 &type->s_umount_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 pnp_lock irq_context: 0 pnp_lock fs_reclaim irq_context: 0 pnp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pnp_lock pool_lock#2 irq_context: 0 &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 &device->physical_node_lock fs_reclaim irq_context: 0 &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &device->physical_node_lock pool_lock#2 irq_context: 0 &device->physical_node_lock lock irq_context: 0 &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 fwnode_link_lock irq_context: 0 fwnode_link_lock &k->k_lock irq_context: 0 subsys mutex#19 irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override &c->lock irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 subsys mutex#20 irq_context: 0 subsys mutex#20 &k->k_lock irq_context: 0 subsys mutex#21 irq_context: 0 subsys mutex#21 &k->k_lock irq_context: 0 subsys mutex#22 irq_context: 0 subsys mutex#22 &k->k_lock irq_context: 0 tty_mutex irq_context: 0 sb_writers &type->i_mutex_dir_key/1 quarantine_lock irq_context: 0 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 fill_pool_map-wait-type-override &zone->lock irq_context: softirq led_lock irq_context: 0 misc_mtx &pcp->lock &zone->lock irq_context: 0 misc_mtx &zone->lock irq_context: 0 subsys mutex#23 irq_context: 0 subsys mutex#23 &k->list_lock irq_context: 0 subsys mutex#23 &k->k_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &xa->xa_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex kthread_create_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &x->wait irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 netevent_notif_chain.lock irq_context: 0 clients_rwsem irq_context: 0 clients_rwsem fs_reclaim irq_context: 0 clients_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 clients_rwsem clients.xa_lock irq_context: 0 devices_rwsem irq_context: 0 clients_rwsem clients.xa_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (blocking_lsm_notifier_chain).rwsem irq_context: 0 (inetaddr_chain).rwsem irq_context: 0 inet6addr_chain.lock irq_context: 0 buses_mutex irq_context: 0 offload_lock irq_context: 0 inetsw_lock irq_context: 0 (wq_completion)events_power_efficient irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: 0 ptype_lock irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem slab_mutex irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem slab_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem slab_mutex &c->lock irq_context: 0 pernet_ops_rwsem slab_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 tcp_ulp_list_lock irq_context: 0 xfrm_state_afinfo_lock irq_context: 0 xfrm_policy_afinfo_lock irq_context: 0 xfrm_input_afinfo_lock irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex krc.lock irq_context: 0 rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock &base->lock irq_context: 0 rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: hardirq rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &zone->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_highpri irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &zone->lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &____s->seqcount irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) pool_lock#2 irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) krc.lock irq_context: 0 &hashinfo->lock irq_context: 0 tcp_cong_list_lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (rpc_pipefs_notifier_list).rwsem irq_context: 0 svc_xprt_class_lock irq_context: 0 xprt_list_lock irq_context: 0 xprt_list_lock (console_sem).lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner_lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner irq_context: 0 xprt_list_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 xprt_list_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) cache_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock &obj_hash[i].lock irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: softirq (&tcp_orphan_timer) irq_context: softirq (&tcp_orphan_timer) &obj_hash[i].lock irq_context: softirq (&tcp_orphan_timer) &base->lock irq_context: softirq (&tcp_orphan_timer) &base->lock &obj_hash[i].lock irq_context: 0 pcibios_fwaddrmap_lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock init_fs.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 mount_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock pool_lock#2 irq_context: 0 umhelper_sem irq_context: 0 umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 &pool->lock/1 &base->lock irq_context: 0 &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_files.file_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->alloc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &drv->dynids.lock irq_context: 0 umh_sysctl_lock irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &zone->lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &sig->cred_guard_mutex irq_context: 0 &sig->cred_guard_mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &fs->lock irq_context: 0 &sig->cred_guard_mutex &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &zone->lock irq_context: 0 &sig->cred_guard_mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &____s->seqcount#3 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex pgd_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex key irq_context: 0 &sig->cred_guard_mutex pcpu_lock irq_context: 0 &sig->cred_guard_mutex percpu_counters_lock irq_context: 0 &tsk->futex_exit_mutex irq_context: 0 &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &p->alloc_lock &fs->lock irq_context: 0 &child->perf_event_mutex irq_context: 0 css_set_lock irq_context: 0 tasklist_lock irq_context: 0 tasklist_lock &pid->wait_pidfd irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit irq_context: 0 tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pool_lock#2 irq_context: 0 tasklist_lock &obj_hash[i].lock irq_context: 0 low_water_lock irq_context: 0 low_water_lock (console_sem).lock irq_context: 0 low_water_lock console_lock console_srcu console_owner_lock irq_context: 0 low_water_lock console_lock console_srcu console_owner irq_context: 0 low_water_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 low_water_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &x->wait#6 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up &n->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up pool_lock#2 irq_context: 0 &x->wait#6 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex pool_lock#2 irq_context: 0 vendor_module_lock irq_context: 0 vendor_module_lock slab_mutex irq_context: 0 vendor_module_lock slab_mutex fs_reclaim irq_context: 0 vendor_module_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vendor_module_lock slab_mutex pool_lock#2 irq_context: 0 vendor_module_lock slab_mutex &c->lock irq_context: 0 vendor_module_lock slab_mutex &n->list_lock irq_context: 0 vendor_module_lock slab_mutex pcpu_alloc_mutex irq_context: 0 vendor_module_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 vendor_module_lock pcpu_alloc_mutex irq_context: 0 vendor_module_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 vendor_module_lock slab_mutex &____s->seqcount irq_context: 0 vendor_module_lock &obj_hash[i].lock irq_context: 0 vendor_module_lock percpu_counters_lock irq_context: 0 vendor_module_lock fs_reclaim irq_context: 0 vendor_module_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vendor_module_lock pool_lock#2 irq_context: 0 vendor_module_lock shrinker_rwsem irq_context: 0 vendor_module_lock &pcp->lock &zone->lock irq_context: 0 vendor_module_lock &zone->lock irq_context: 0 vendor_module_lock &____s->seqcount irq_context: 0 vendor_module_lock cpu_hotplug_lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: softirq &(&cache_cleaner)->timer irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vendor_module_lock timekeeper_lock irq_context: 0 vendor_module_lock timekeeper_lock pvclock_gtod_data irq_context: 0 misc_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock irq_context: hardirq timekeeper_lock tk_core.seq.seqcount pvclock_gtod_data irq_context: 0 &sig->cred_guard_mutex &obj_hash[i].lock pool_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#9 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up bus_type_sem irq_context: 0 cpu_hotplug_lock cpuhp_state-up sysfs_symlink_target_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up dpm_list_mtx irq_context: 0 cpu_hotplug_lock cpuhp_state-up req_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#11 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock &wq irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#5 irq_context: 0 cpu_hotplug_lock cpuhp_state-up uevent_sock_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up running_helpers_waitq.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#24 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#24 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock pool_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up fill_pool_map-wait-type-override pool_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &rq->__lock irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &zone->lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#25 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#25 &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &rq->__lock irq_context: 0 crypto_alg_sem irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &____s->seqcount irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pm_qos_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock pool_lock irq_context: 0 subsys mutex#26 irq_context: 0 subsys mutex#27 irq_context: 0 subsys mutex#27 &k->list_lock irq_context: 0 subsys mutex#27 &k->k_lock irq_context: 0 subsys mutex#28 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 tasklist_lock &c->lock irq_context: softirq rcu_callback quarantine_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) irq_context: 0 (wq_completion)events (work_completion)(&p->wq) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) pool_lock#2 irq_context: 0 trace_event_sem trace_event_sem.wait_lock irq_context: 0 trace_event_sem &rq->__lock irq_context: 0 trace_event_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 jiffies_seq.seqcount irq_context: softirq rcu_callback &obj_hash[i].lock pool_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pool_lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq mm/memcontrol.c:589 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work cgroup_rstat_lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock &obj_hash[i].lock irq_context: softirq &(&group->avgs_work)->timer irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock &obj_hash[i].lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&krcp->monitor_work)->timer irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&tbl->managed_work)->timer irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 &sbi->old_work_lock irq_context: 0 &type->s_umount_key#24/1 (work_completion)(&(&sbi->old_work)->work) irq_context: 0 &type->s_umount_key#24/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#24/1 sb_lock irq_context: 0 &disk->open_mutex &meta->lock irq_context: 0 &disk->open_mutex kfence_freelist_lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &xa->xa_lock#13 irq_context: 0 sb_lock &obj_hash[i].lock irq_context: 0 sb_lock pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &c->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &____s->seqcount irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 sb_lock irq_context: 0 &type->s_umount_key#25/1 irq_context: 0 &type->s_umount_key#25/1 fs_reclaim irq_context: 0 &type->s_umount_key#25/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#25/1 &zone->lock irq_context: 0 &type->s_umount_key#25/1 &____s->seqcount irq_context: 0 &type->s_umount_key#25/1 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#25/1 lock#4 irq_context: 0 &type->s_umount_key#25/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#25/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#25/1 &dd->lock irq_context: 0 &type->s_umount_key#25/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#25/1 &rq->__lock irq_context: 0 &type->s_umount_key#25/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&timer.timer) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#25/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#25/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#25/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#25/1 lock#5 irq_context: 0 pmus_lock fs_reclaim irq_context: 0 &type->s_umount_key#25/1 &lruvec->lru_lock irq_context: 0 pmus_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 crypto_alg_sem irq_context: 0 pmus_lock &k->list_lock irq_context: 0 pmus_lock lock irq_context: 0 pmus_lock lock kernfs_idr_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pmus_lock uevent_sock_mutex irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pmus_lock running_helpers_waitq.lock irq_context: 0 pmus_lock &x->wait#9 irq_context: 0 pmus_lock bus_type_sem irq_context: 0 pmus_lock &c->lock irq_context: 0 pmus_lock &____s->seqcount irq_context: 0 pmus_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 pmus_lock sysfs_symlink_target_lock irq_context: 0 pmus_lock &k->k_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &dev->power.lock irq_context: 0 pmus_lock dpm_list_mtx irq_context: 0 pmus_lock &dev->mutex &k->list_lock irq_context: 0 pmus_lock &dev->mutex &k->k_lock irq_context: 0 pmus_lock &dev->mutex &dev->power.lock irq_context: 0 pmus_lock subsys mutex#29 irq_context: 0 pmus_lock &pcp->lock &zone->lock irq_context: 0 pmus_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#25/1 lock#3 irq_context: 0 &type->s_umount_key#25/1 lock#3 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 lock#3 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#25/1 lock#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 lock#3 &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#5 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 lock#3 (work_completion)(work) irq_context: 0 &type->s_umount_key#25/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#25/1 sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 sb_lock irq_context: 0 &type->s_umount_key#26/1 irq_context: 0 key_user_lock irq_context: 0 crngs.lock base_crng.lock irq_context: 0 key_serial_lock irq_context: 0 key_construction_mutex irq_context: 0 &type->lock_class irq_context: 0 &type->lock_class keyring_serialise_link_lock irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 keyring_serialise_link_lock irq_context: 0 &pgdat->kswapd_lock fs_reclaim irq_context: 0 &pgdat->kswapd_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pgdat->kswapd_lock pool_lock#2 irq_context: 0 &pgdat->kswapd_lock kthread_create_lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pgdat->kswapd_lock &x->wait irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock irq_context: 0 &pgdat->kswapd_lock &rq->__lock irq_context: 0 &pgdat->kswapd_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &pgdat->kswapd_lock &obj_hash[i].lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock quarantine_lock irq_context: 0 &pgdat->kswapd_wait irq_context: 0 list_lrus_mutex irq_context: 0 drivers_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock irq_context: 0 damon_dbgfs_lock fs_reclaim irq_context: 0 damon_dbgfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock pool_lock#2 irq_context: 0 damon_dbgfs_lock tk_core.seq.seqcount irq_context: 0 damon_dbgfs_lock damon_ops_lock irq_context: 0 damon_dbgfs_lock pin_fs_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &type->s_umount_key#20/1 irq_context: 0 &type->s_umount_key#20/1 fs_reclaim irq_context: 0 &type->s_umount_key#20/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#20/1 pool_lock#2 irq_context: 0 &type->s_umount_key#20/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#20/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#20/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#20/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#20/1 sb_lock irq_context: 0 &type->s_umount_key#20/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#20/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#20/1 &sb->s_type->i_lock_key#18 irq_context: 0 &type->s_umount_key#20/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#20/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#20/1 &sb->s_type->i_lock_key#18 &dentry->d_lock irq_context: 0 &type->s_umount_key#20/1 &dentry->d_lock irq_context: 0 dq_list_lock irq_context: 0 &type->s_umount_key#21/1 irq_context: 0 &type->s_umount_key#21/1 fs_reclaim irq_context: 0 &type->s_umount_key#21/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 pool_lock#2 irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#21/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#21/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#21/1 &c->lock irq_context: 0 &type->s_umount_key#21/1 &____s->seqcount irq_context: 0 &type->s_umount_key#21/1 sb_lock irq_context: 0 &type->s_umount_key#21/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#21/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#19 irq_context: 0 &type->s_umount_key#21/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#21/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &type->s_umount_key#21/1 &dentry->d_lock irq_context: 0 configfs_subsystem_mutex irq_context: 0 &sb->s_type->i_mutex_key#7/1 irq_context: 0 &sb->s_type->i_mutex_key#7/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 irq_context: 0 misc_mtx rcu_read_lock pool_lock#2 irq_context: 0 pcpu_alloc_mutex &rq->__lock irq_context: 0 &mm->mmap_lock &rq->__lock irq_context: 0 ecryptfs_daemon_hash_mux irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ecryptfs_daemon_hash_mux pool_lock#2 irq_context: 0 ecryptfs_msg_ctx_lists_mux irq_context: 0 ecryptfs_msg_ctx_lists_mux &ecryptfs_msg_ctx_arr[i].mux irq_context: 0 &ecryptfs_kthread_ctl.wait irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem running_helpers_waitq.lock irq_context: 0 nfs_version_lock irq_context: 0 key_types_sem irq_context: 0 key_types_sem (console_sem).lock irq_context: 0 key_types_sem console_lock console_srcu console_owner_lock irq_context: 0 key_types_sem console_lock console_srcu console_owner irq_context: 0 key_types_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 key_types_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pnfs_spinlock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 slab_mutex batched_entropy_u8.lock irq_context: 0 slab_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 slab_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 nls_lock irq_context: 0 key_types_sem &rq->__lock irq_context: 0 jffs2_compressor_list_lock irq_context: 0 misc_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 next_tag_value_lock irq_context: 0 log_redrive_lock irq_context: 0 &TxAnchor.LazyLock irq_context: 0 &TxAnchor.LazyLock jfs_commit_thread_wait.lock irq_context: 0 jfsTxnLock irq_context: 0 ocfs2_stack_lock irq_context: 0 ocfs2_stack_lock (console_sem).lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner_lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 o2hb_callback_sem irq_context: 0 &sb->s_type->i_mutex_key#7/1 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &zone->lock irq_context: 0 o2net_handler_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &type->s_umount_key#22/1 irq_context: 0 &type->s_umount_key#22/1 fs_reclaim irq_context: 0 &type->s_umount_key#22/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#22/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#22/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#22/1 sb_lock irq_context: 0 &type->s_umount_key#22/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#22/1 &____s->seqcount irq_context: 0 &type->s_umount_key#22/1 &c->lock irq_context: 0 &type->s_umount_key#22/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#20 irq_context: 0 &type->s_umount_key#22/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#22/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#22/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 alg_types_sem irq_context: 0 alg_types_sem fs_reclaim irq_context: 0 alg_types_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 alg_types_sem batched_entropy_u8.lock irq_context: 0 alg_types_sem kfence_freelist_lock irq_context: 0 alg_types_sem pool_lock#2 irq_context: 0 dma_list_mutex irq_context: 0 asymmetric_key_parsers_sem irq_context: 0 asymmetric_key_parsers_sem (console_sem).lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner_lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 blkcg_pol_register_mutex irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex cgroup_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock kernfs_idr_lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 elv_list_lock irq_context: 0 crc_t10dif_mutex irq_context: 0 crc_t10dif_mutex crypto_alg_sem irq_context: 0 crc_t10dif_mutex fs_reclaim irq_context: 0 crc_t10dif_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc_t10dif_mutex &pcp->lock &zone->lock irq_context: 0 crc_t10dif_mutex &zone->lock irq_context: 0 crc_t10dif_mutex &____s->seqcount irq_context: 0 crc_t10dif_mutex pool_lock#2 irq_context: 0 crc_t10dif_mutex rcu_read_lock pool_lock#2 irq_context: 0 crc_t10dif_mutex &obj_hash[i].lock irq_context: 0 crc64_rocksoft_mutex irq_context: 0 crc64_rocksoft_mutex crypto_alg_sem irq_context: 0 crc64_rocksoft_mutex fs_reclaim irq_context: 0 crc64_rocksoft_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc64_rocksoft_mutex pool_lock#2 irq_context: 0 ts_mod_lock irq_context: 0 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex device_links_lock irq_context: 0 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex lock irq_context: 0 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &c->lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &zone->lock irq_context: 0 &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex &x->wait#9 irq_context: 0 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex gdp_mutex irq_context: 0 &dev->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex gdp_mutex lock irq_context: 0 &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex bus_type_sem irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex &rq->__lock irq_context: 0 &dev->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex subsys mutex#30 irq_context: 0 &dev->mutex subsys mutex#30 &k->k_lock irq_context: 0 &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: softirq rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex input_mutex irq_context: 0 &dev->mutex input_mutex fs_reclaim irq_context: 0 &dev->mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex kfence_freelist_lock irq_context: 0 &dev->mutex input_mutex pool_lock#2 irq_context: 0 &dev->mutex input_mutex &dev->mutex#2 irq_context: 0 &dev->mutex input_mutex input_devices_poll_wait.lock irq_context: 0 &dev->mutex semaphore->lock irq_context: 0 &dev->mutex *(&acpi_gbl_hardware_lock) irq_context: 0 &dev->mutex wakeup_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#15 irq_context: 0 &dev->mutex subsys mutex#15 &k->k_lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex events_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &dev->mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &dev->mutex wakeup_srcu irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex wakeup_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) wakeup_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &x->wait#3 irq_context: 0 &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex wakeup_srcu_srcu_usage.lock irq_context: softirq rcu_callback &meta->lock irq_context: softirq rcu_callback kfence_freelist_lock irq_context: 0 &dev->mutex (&ws->timer) irq_context: 0 &dev->mutex &base->lock irq_context: 0 &dev->mutex &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &dev->mutex subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &zone->lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 &dev->mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex kernfs_idr_lock irq_context: 0 &dev->mutex &ws->lock irq_context: 0 &dev->mutex deleted_ws.lock irq_context: 0 &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex gdp_mutex &c->lock irq_context: 0 &dev->mutex gdp_mutex &____s->seqcount irq_context: 0 register_count_mutex irq_context: 0 register_count_mutex &k->list_lock irq_context: 0 register_count_mutex fs_reclaim irq_context: 0 register_count_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_count_mutex pool_lock#2 irq_context: 0 register_count_mutex lock irq_context: 0 register_count_mutex lock kernfs_idr_lock irq_context: 0 register_count_mutex &root->kernfs_rwsem irq_context: 0 register_count_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_count_mutex &k->k_lock irq_context: 0 register_count_mutex &c->lock irq_context: 0 register_count_mutex &____s->seqcount irq_context: 0 register_count_mutex uevent_sock_mutex irq_context: 0 register_count_mutex &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_count_mutex running_helpers_waitq.lock irq_context: 0 register_count_mutex &rq->__lock irq_context: 0 (cpufreq_policy_notifier_list).rwsem irq_context: 0 &dev->mutex cpu_add_remove_lock irq_context: 0 &dev->mutex tick_broadcast_lock irq_context: 0 &dev->mutex cpuidle_driver_lock irq_context: 0 &dev->mutex cpuidle_lock irq_context: 0 &dev->mutex cpuidle_lock fs_reclaim irq_context: 0 &dev->mutex cpuidle_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpuidle_lock pool_lock#2 irq_context: 0 &dev->mutex cpuidle_lock lock irq_context: 0 &dev->mutex cpuidle_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex cpuidle_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex cpuidle_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex cpuidle_lock &c->lock irq_context: 0 &dev->mutex cpuidle_lock &____s->seqcount irq_context: 0 &dev->mutex thermal_cdev_ida.xa_lock irq_context: 0 &dev->mutex cpufreq_driver_lock irq_context: 0 &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex subsys mutex#31 irq_context: 0 &dev->mutex subsys mutex#31 &k->k_lock irq_context: 0 &dev->mutex thermal_list_lock irq_context: 0 &dev->mutex cpuidle_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex.wait_lock irq_context: 0 (x86_mce_decoder_chain).rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) running_helpers_waitq.lock irq_context: 0 pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 pcpu_alloc_mutex pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &drv->dynids.lock irq_context: 0 &dev->mutex pci_config_lock irq_context: 0 &dev->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_link_lock irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex acpi_link_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock semaphore->lock irq_context: 0 &dev->mutex acpi_link_lock &obj_hash[i].lock irq_context: 0 &dev->mutex acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_link_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex acpi_link_lock &zone->lock irq_context: 0 &dev->mutex acpi_link_lock &____s->seqcount irq_context: 0 &dev->mutex acpi_link_lock &c->lock irq_context: 0 &dev->mutex acpi_link_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock pci_config_lock irq_context: 0 &dev->mutex acpi_link_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex acpi_link_lock (console_sem).lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex acpi_link_lock &rq->__lock irq_context: 0 &dev->mutex acpi_ioapic_lock irq_context: 0 &dev->mutex acpi_ioapic_lock ioapic_mutex irq_context: 0 &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex virtio_index_ida.xa_lock irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#32 irq_context: 0 &dev->mutex fwnode_link_lock &k->k_lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex &md->mutex irq_context: 0 &dev->mutex &md->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex memtype_lock irq_context: 0 &dev->mutex free_vmap_area_lock irq_context: 0 &dev->mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &dev->mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &dev->mutex vmap_area_lock irq_context: 0 &dev->mutex &md->mutex pci_config_lock irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#4 irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#4 pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex vector_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex vector_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex lock irq_context: 0 &dev->mutex &md->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &c->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &domain->mutex &c->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &____s->seqcount irq_context: 0 &dev->mutex &desc->request_mutex irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &dev->mutex register_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex register_lock fs_reclaim irq_context: 0 &dev->mutex register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_lock pool_lock#2 irq_context: 0 &dev->mutex register_lock proc_inum_ida.xa_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex register_lock batched_entropy_u8.lock irq_context: 0 &dev->mutex register_lock kfence_freelist_lock irq_context: 0 &dev->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &dev->vqs_list_lock irq_context: 0 &dev->mutex &vp_dev->lock irq_context: 0 &dev->mutex register_lock &c->lock irq_context: 0 &dev->mutex register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_lock &zone->lock irq_context: 0 &dev->mutex register_lock &____s->seqcount irq_context: 0 &dev->mutex cpu_hotplug_lock irq_context: 0 &dev->mutex &s->s_inode_list_lock irq_context: 0 &dev->mutex (oom_notify_list).rwsem irq_context: 0 &dev->mutex &dev->config_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 vdpa_dev_lock irq_context: 0 subsys mutex#33 irq_context: 0 subsys mutex#33 &k->k_lock irq_context: 0 &sig->cred_guard_mutex quarantine_lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex pool_lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: hardirq &vb->stop_update_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_freezable irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &s->s_inode_list_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &n->list_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 fs_reclaim &rq->__lock irq_context: 0 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &cfs_rq->removed.lock irq_context: 0 &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &meta->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&p->wq) quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex pnp_lock irq_context: 0 &dev->mutex serial_mutex irq_context: 0 &dev->mutex serial_mutex gpio_lookup_lock irq_context: 0 &dev->mutex serial_mutex port_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &x->wait#9 irq_context: 0 &dev->mutex serial_mutex port_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex bus_type_sem irq_context: 0 &dev->mutex serial_mutex port_mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex dpm_list_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex uevent_sock_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex serial_mutex port_mutex subsys mutex#34 irq_context: 0 &dev->mutex serial_mutex port_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &rq->__lock irq_context: 0 (wq_completion)pm irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex resource_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &port_lock_key irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex (console_sem).lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex ctrl_ida.xa_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &x->wait#9 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex bus_type_sem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dpm_list_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex uevent_sock_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#35 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex semaphore->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex deferred_probe_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex device_links_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex req_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &x->wait#11 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#21 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#21 &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex chrdevs_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 misc_mtx &cfs_rq->removed.lock irq_context: 0 &dev->mutex rng_index_ida.xa_lock irq_context: hardirq &x->wait#12 irq_context: 0 &dev->mutex rng_mutex irq_context: 0 &dev->mutex rng_mutex &x->wait#13 irq_context: 0 &dev->mutex rng_mutex fs_reclaim irq_context: 0 &dev->mutex rng_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rng_mutex pool_lock#2 irq_context: 0 &dev->mutex rng_mutex kthread_create_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rng_mutex &rq->__lock irq_context: 0 &dev->mutex rng_mutex &x->wait irq_context: 0 &dev->mutex rng_mutex &obj_hash[i].lock irq_context: 0 rng_mutex irq_context: 0 reading_mutex irq_context: 0 &dev->mutex reading_mutex irq_context: 0 &dev->mutex reading_mutex reading_mutex.wait_lock irq_context: 0 &dev->mutex input_pool.lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 klist_remove_lock irq_context: 0 &k->k_lock klist_remove_lock irq_context: 0 kernfs_idr_lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->devres_lock irq_context: 0 &dev->managed.lock irq_context: 0 &type->s_umount_key#23/1 irq_context: 0 &type->s_umount_key#23/1 fs_reclaim irq_context: 0 &type->s_umount_key#23/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 pool_lock#2 irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#23/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#23/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#23/1 sb_lock irq_context: 0 &type->s_umount_key#23/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#23/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#21 irq_context: 0 &type->s_umount_key#23/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#23/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#21 &dentry->d_lock irq_context: 0 &type->s_umount_key#23/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#21 irq_context: 0 lock drm_minor_lock irq_context: 0 lock drm_minor_lock pool_lock#2 irq_context: 0 stack_depot_init_mutex irq_context: 0 &dev->debugfs_mutex irq_context: 0 subsys mutex#36 irq_context: 0 subsys mutex#36 &k->k_lock irq_context: 0 drm_minor_lock irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: hardirq &rt_b->rt_runtime_lock irq_context: hardirq &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: hardirq &rt_rq->rt_runtime_lock irq_context: 0 (worker)->lock irq_context: 0 &rq->__lock &rt_rq->rt_runtime_lock irq_context: 0 &dev->mode_config.idr_mutex irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mode_config.idr_mutex pool_lock#2 irq_context: 0 crtc_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &dev->mode_config.blob_lock irq_context: 0 &xa->xa_lock#5 irq_context: 0 &xa->xa_lock#6 irq_context: 0 &dev->mode_config.connector_list_lock irq_context: 0 &dev->vbl_lock irq_context: 0 drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 drm_connector_list_iter fs_reclaim irq_context: 0 drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &x->wait#9 irq_context: 0 drm_connector_list_iter &connector->mutex &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->list_lock irq_context: 0 drm_connector_list_iter &connector->mutex lock irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex bus_type_sem irq_context: 0 drm_connector_list_iter &connector->mutex &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &pcp->lock &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex sysfs_symlink_target_lock irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &dev->power.lock irq_context: 0 drm_connector_list_iter &connector->mutex dpm_list_mtx irq_context: 0 drm_connector_list_iter &connector->mutex uevent_sock_mutex irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 drm_connector_list_iter &connector->mutex running_helpers_waitq.lock irq_context: 0 drm_connector_list_iter &connector->mutex &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#36 irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#36 &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex pin_fs_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &dev->mode_config.idr_mutex irq_context: 0 drm_connector_list_iter &connector->mutex &cfs_rq->removed.lock irq_context: 0 drm_connector_list_iter &connector->mutex connector_list_lock irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &dev->filelist_mutex irq_context: 0 &dev->clientlist_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &zone->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex &c->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &c->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &sb->s_type->i_lock_key irq_context: 0 &dev->clientlist_mutex &helper->lock &s->s_inode_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock batched_entropy_u32.lock irq_context: 0 &dev->clientlist_mutex &helper->lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &mgr->vm_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &mgr->vm_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock &file_private->table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock &file_private->table_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->mode_config.idr_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->mode_config.fb_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file->fbs_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &prime_fpriv->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock free_vmap_area_lock irq_context: 0 &dev->clientlist_mutex &helper->lock vmap_area_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &zone->lock irq_context: 0 &dev->clientlist_mutex &helper->lock init_mm.page_table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock irq_context: 0 &dev->clientlist_mutex registration_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock &x->wait#9 irq_context: 0 &dev->clientlist_mutex registration_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock lock irq_context: 0 &dev->clientlist_mutex registration_lock lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock bus_type_sem irq_context: 0 &dev->clientlist_mutex registration_lock sysfs_symlink_target_lock irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock &dev->power.lock irq_context: 0 &dev->clientlist_mutex registration_lock dpm_list_mtx irq_context: 0 &dev->clientlist_mutex registration_lock req_lock irq_context: 0 &dev->clientlist_mutex registration_lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock &x->wait#11 irq_context: 0 &dev->clientlist_mutex registration_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock uevent_sock_mutex irq_context: 0 &dev->clientlist_mutex registration_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock running_helpers_waitq.lock irq_context: 0 &dev->clientlist_mutex registration_lock &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock subsys mutex#11 irq_context: 0 &dev->clientlist_mutex registration_lock subsys mutex#11 &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock (console_sem).lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &fb_info->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &base->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &base->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &x->wait#9 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock gdp_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock gdp_mutex &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock bus_type_sem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock sysfs_symlink_target_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &dev->power.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock dpm_list_mtx irq_context: 0 &dev->clientlist_mutex registration_lock console_lock uevent_sock_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock running_helpers_waitq.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock subsys mutex#5 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock subsys mutex#5 &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock vga_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->mode_config.idr_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->mode_config.blob_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &crtc->commit_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock lock#4 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &info->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock lock#4 &lruvec->lru_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 kfence_freelist_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock free_vmap_area_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock vmap_area_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock init_mm.page_table_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &____s->seqcount#6 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &x->wait#14 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock &____s->seqcount#6 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &x->wait#14 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (work_completion)(&vkms_state->composer_work) irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->damage_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->damage_lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vblank_time_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &base->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &base->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &vkms_out->lock irq_context: hardirq &vkms_out->lock &dev->event_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock &____s->seqcount#6 irq_context: hardirq &vkms_out->lock &dev->event_lock &vblank->queue irq_context: hardirq &vkms_out->lock &dev->event_lock &____s->seqcount#6 irq_context: hardirq &vkms_out->lock &dev->event_lock &obj_hash[i].lock irq_context: hardirq &vkms_out->lock &dev->event_lock &base->lock irq_context: hardirq &vkms_out->lock &dev->event_lock &base->lock &obj_hash[i].lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &vkms_out->lock &dev->event_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (&timer.timer) irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (work_completion)(&vkms_state->composer_work)#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &lock->wait_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock reservation_ww_class_mutex irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock reservation_ww_class_mutex &shmem->vmap_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock vt_event_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock (console_sem).lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_owner_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu console_owner irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex (console_sem).lock irq_context: 0 &dev->clientlist_mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->clientlist_mutex console_lock console_srcu console_owner irq_context: 0 &dev->clientlist_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->clientlist_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->clientlist_mutex kernel_fb_helper_lock irq_context: 0 &dev->queue_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &set->tag_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock slab_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock slab_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock slab_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock slab_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock slab_mutex &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock slab_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock bio_slabs.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock bio_slabs.xa_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 blk_queue_ida.xa_lock irq_context: 0 &sb->s_type->i_lock_key#3 irq_context: 0 &xa->xa_lock#9 irq_context: 0 lock &q->queue_lock irq_context: 0 lock &q->queue_lock &blkcg->lock irq_context: 0 &q->queue_lock irq_context: 0 &q->queue_lock pool_lock#2 irq_context: 0 &q->queue_lock pcpu_lock irq_context: 0 &q->queue_lock &obj_hash[i].lock irq_context: 0 &q->queue_lock percpu_counters_lock irq_context: 0 &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock irq_context: 0 subsys mutex#37 irq_context: 0 subsys mutex#37 &k->k_lock irq_context: 0 dev_hotplug_mutex irq_context: 0 dev_hotplug_mutex &dev->power.lock irq_context: 0 &q->sysfs_dir_lock irq_context: 0 &q->sysfs_dir_lock fs_reclaim irq_context: 0 &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &c->lock irq_context: 0 &q->sysfs_dir_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex &rq->__lock irq_context: 0 &q->sysfs_dir_lock batched_entropy_u8.lock irq_context: 0 &q->sysfs_dir_lock kfence_freelist_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 percpu_ref_switch_lock irq_context: 0 subsys mutex#38 irq_context: 0 subsys mutex#38 &k->k_lock irq_context: 0 cgwb_lock irq_context: 0 bdi_lock irq_context: 0 inode_hash_lock irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 bdev_lock irq_context: 0 &disk->open_mutex irq_context: 0 &disk->open_mutex fs_reclaim irq_context: 0 &disk->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex pool_lock#2 irq_context: 0 &disk->open_mutex free_vmap_area_lock irq_context: 0 &disk->open_mutex vmap_area_lock irq_context: 0 &disk->open_mutex &pcp->lock &zone->lock irq_context: 0 &disk->open_mutex &zone->lock irq_context: 0 &disk->open_mutex &____s->seqcount irq_context: 0 &disk->open_mutex init_mm.page_table_lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 irq_context: 0 &disk->open_mutex lock#4 irq_context: 0 &disk->open_mutex mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex &c->lock irq_context: 0 &disk->open_mutex &mapping->private_lock irq_context: 0 &disk->open_mutex tk_core.seq.seqcount irq_context: 0 &disk->open_mutex &ret->b_uptodate_lock irq_context: 0 &disk->open_mutex &obj_hash[i].lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 pool_lock#2 irq_context: 0 &disk->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 &disk->open_mutex purge_vmap_area_lock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock major_names_spinlock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &disk->open_mutex lock#4 &lruvec->lru_lock irq_context: 0 &disk->open_mutex lock#5 irq_context: 0 &disk->open_mutex &lruvec->lru_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rtc_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) register_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &irq_desc_lock_class irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) resource_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &disk->open_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 &c->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 &____s->seqcount irq_context: 0 &disk->open_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 &q->queue_lock &c->lock irq_context: 0 &q->queue_lock &pcp->lock &zone->lock irq_context: 0 &q->queue_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->queue_lock &____s->seqcount irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override pool_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &c->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) command_done.lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 &q->queue_lock &zone->lock irq_context: 0 loop_ctl_mutex irq_context: 0 loop_ctl_mutex fs_reclaim irq_context: 0 loop_ctl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 loop_ctl_mutex pool_lock#2 irq_context: 0 &q->sysfs_lock irq_context: 0 &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_lock pool_lock#2 irq_context: 0 &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_lock &c->lock irq_context: 0 &q->sysfs_lock &n->list_lock irq_context: 0 &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_lock &zone->lock irq_context: 0 &q->sysfs_lock &____s->seqcount irq_context: 0 &q->sysfs_lock cpu_hotplug_lock irq_context: 0 &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 &q->sysfs_lock fs_reclaim irq_context: 0 &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_lock &xa->xa_lock#8 irq_context: 0 &set->tag_list_lock irq_context: 0 &q->mq_freeze_lock irq_context: 0 &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex percpu_ref_switch_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex pin_fs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: softirq &(&ops->cursor_work)->timer irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) (console_sem).lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock &helper->damage_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &c->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &zone->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback fill_pool_map-wait-type-override pool_lock irq_context: 0 nbd_index_mutex irq_context: 0 nbd_index_mutex fs_reclaim irq_context: 0 nbd_index_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nbd_index_mutex pool_lock#2 irq_context: 0 set->srcu irq_context: 0 (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (work_completion)(&(&hctx->run_work)->work) irq_context: 0 &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex set->srcu irq_context: 0 &q->sysfs_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &q->sysfs_lock fill_pool_map-wait-type-override pool_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &q->sysfs_lock batched_entropy_u8.lock irq_context: 0 &q->sysfs_lock kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &q->sysfs_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 zram_index_mutex irq_context: 0 zram_index_mutex fs_reclaim irq_context: 0 zram_index_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex pool_lock#2 irq_context: 0 zram_index_mutex &c->lock irq_context: 0 zram_index_mutex &pcp->lock &zone->lock irq_context: 0 zram_index_mutex &zone->lock irq_context: 0 zram_index_mutex &____s->seqcount irq_context: 0 zram_index_mutex blk_queue_ida.xa_lock irq_context: 0 zram_index_mutex &obj_hash[i].lock irq_context: 0 zram_index_mutex pcpu_alloc_mutex irq_context: 0 zram_index_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 zram_index_mutex bio_slab_lock irq_context: 0 zram_index_mutex percpu_counters_lock irq_context: 0 zram_index_mutex mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_lock_key#3 irq_context: 0 zram_index_mutex &s->s_inode_list_lock irq_context: 0 zram_index_mutex &xa->xa_lock#9 irq_context: 0 zram_index_mutex lock irq_context: 0 zram_index_mutex lock &q->queue_lock irq_context: 0 zram_index_mutex lock &q->queue_lock &blkcg->lock irq_context: 0 zram_index_mutex &q->queue_lock irq_context: 0 zram_index_mutex &q->queue_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->queue_lock pcpu_lock irq_context: 0 zram_index_mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 zram_index_mutex &q->queue_lock percpu_counters_lock irq_context: 0 zram_index_mutex &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 zram_index_mutex &x->wait#9 irq_context: 0 zram_index_mutex &bdev->bd_size_lock irq_context: 0 zram_index_mutex &k->list_lock irq_context: 0 zram_index_mutex gdp_mutex irq_context: 0 zram_index_mutex gdp_mutex &k->list_lock irq_context: 0 zram_index_mutex lock kernfs_idr_lock irq_context: 0 zram_index_mutex &root->kernfs_rwsem irq_context: 0 zram_index_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 zram_index_mutex bus_type_sem irq_context: 0 zram_index_mutex sysfs_symlink_target_lock irq_context: 0 zram_index_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 zram_index_mutex &root->kernfs_rwsem irq_context: 0 zram_index_mutex &dev->power.lock irq_context: 0 zram_index_mutex dpm_list_mtx irq_context: 0 zram_index_mutex req_lock irq_context: 0 zram_index_mutex &p->pi_lock irq_context: 0 zram_index_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 zram_index_mutex &x->wait#11 irq_context: 0 zram_index_mutex &rq->__lock irq_context: 0 zram_index_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 zram_index_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 zram_index_mutex subsys mutex#37 irq_context: 0 zram_index_mutex subsys mutex#37 &k->k_lock irq_context: 0 zram_index_mutex dev_hotplug_mutex irq_context: 0 zram_index_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 zram_index_mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &c->lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 zram_index_mutex percpu_ref_switch_lock irq_context: 0 zram_index_mutex uevent_sock_mutex irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 zram_index_mutex running_helpers_waitq.lock irq_context: 0 zram_index_mutex subsys mutex#38 irq_context: 0 zram_index_mutex subsys mutex#38 &k->k_lock irq_context: 0 zram_index_mutex cgwb_lock irq_context: 0 zram_index_mutex pin_fs_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 zram_index_mutex bdi_lock irq_context: 0 zram_index_mutex inode_hash_lock irq_context: 0 zram_index_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 zram_index_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 zram_index_mutex &cfs_rq->removed.lock irq_context: 0 zram_index_mutex (console_sem).lock irq_context: 0 zram_index_mutex console_lock console_srcu console_owner_lock irq_context: 0 zram_index_mutex console_lock console_srcu console_owner irq_context: 0 zram_index_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 zram_index_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 subsys mutex#39 irq_context: 0 subsys mutex#39 &k->k_lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 configfs_dirent_lock irq_context: 0 &q->sysfs_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 &lock irq_context: 0 &lock nullb_indexes.xa_lock irq_context: 0 &q->sysfs_dir_lock rcu_read_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &disk->open_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock &base->lock irq_context: 0 &disk->open_mutex rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock &ret->b_uptodate_lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 ctx_list.lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 nfc_index_ida.xa_lock irq_context: 0 nfc_devlist_mutex irq_context: 0 nfc_devlist_mutex fs_reclaim irq_context: 0 nfc_devlist_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfc_devlist_mutex pool_lock#2 irq_context: 0 nfc_devlist_mutex &k->list_lock irq_context: 0 nfc_devlist_mutex gdp_mutex irq_context: 0 nfc_devlist_mutex gdp_mutex &k->list_lock irq_context: 0 nfc_devlist_mutex gdp_mutex fs_reclaim irq_context: 0 nfc_devlist_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfc_devlist_mutex gdp_mutex pool_lock#2 irq_context: 0 nfc_devlist_mutex gdp_mutex lock irq_context: 0 nfc_devlist_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 nfc_devlist_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 nfc_devlist_mutex lock irq_context: 0 nfc_devlist_mutex lock kernfs_idr_lock irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 nfc_devlist_mutex bus_type_sem irq_context: 0 nfc_devlist_mutex sysfs_symlink_target_lock irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex &c->lock irq_context: 0 nfc_devlist_mutex &____s->seqcount irq_context: 0 nfc_devlist_mutex &dev->power.lock irq_context: 0 nfc_devlist_mutex dpm_list_mtx irq_context: 0 nfc_devlist_mutex uevent_sock_mutex irq_context: 0 nfc_devlist_mutex &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfc_devlist_mutex running_helpers_waitq.lock irq_context: 0 nfc_devlist_mutex &rq->__lock irq_context: 0 nfc_devlist_mutex subsys mutex#40 irq_context: 0 nfc_devlist_mutex subsys mutex#40 &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex irq_context: 0 &dev->mutex rfkill_global_mutex fs_reclaim irq_context: 0 &dev->mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rfkill_global_mutex pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex &k->list_lock irq_context: 0 &dev->mutex rfkill_global_mutex lock irq_context: 0 &dev->mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex bus_type_sem irq_context: 0 &dev->mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex rfkill_global_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rfkill_global_mutex &zone->lock irq_context: 0 &dev->mutex rfkill_global_mutex &____s->seqcount irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex &c->lock irq_context: 0 &dev->mutex rfkill_global_mutex &dev->power.lock irq_context: 0 &dev->mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 &dev->mutex rfkill_global_mutex &rfkill->lock irq_context: 0 &dev->mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex rfkill_global_mutex &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex subsys mutex#41 irq_context: 0 &dev->mutex rfkill_global_mutex subsys mutex#41 &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex triggers_list_lock irq_context: 0 &dev->mutex rfkill_global_mutex leds_list_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex.wait_lock irq_context: 0 &dev->mutex &p->pi_lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rfkill->lock irq_context: 0 &pool->lock &base->lock irq_context: 0 &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dma_heap_minors.xa_lock irq_context: 0 subsys mutex#42 irq_context: 0 subsys mutex#42 &k->k_lock irq_context: 0 heap_list_lock irq_context: 0 tasklist_lock &n->list_lock irq_context: 0 &dev->mutex host_index_ida.xa_lock irq_context: 0 &dev->mutex kthread_create_lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &x->wait irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#9 irq_context: 0 &dev->mutex wq_pool_mutex irq_context: 0 &dev->mutex wq_pool_mutex &wq->mutex irq_context: 0 &dev->mutex &md->mutex &c->lock irq_context: 0 &dev->mutex &md->mutex &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &zone->lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class tmp_mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class tmp_mask_lock vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex scsi_sense_cache_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex fs_reclaim irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex kfence_freelist_lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex &c->lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex &n->list_lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pool_lock#2 irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex batched_entropy_u32.lock irq_context: 0 &dev->mutex rcu_node_0 irq_context: 0 &dev->mutex rcu_read_lock rcu_node_0 irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex subsys mutex#43 irq_context: 0 &dev->mutex subsys mutex#44 irq_context: 0 &dev->mutex subsys mutex#44 &k->k_lock irq_context: 0 &dev->mutex attribute_container_mutex irq_context: 0 &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex &virtscsi_vq->vq_lock irq_context: 0 &dev->mutex &shost->scan_mutex irq_context: 0 &dev->mutex &shost->scan_mutex fs_reclaim irq_context: 0 &dev->mutex &shost->scan_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &shost->scan_mutex pool_lock#2 irq_context: 0 &dev->mutex &shost->scan_mutex shost->host_lock irq_context: 0 &dev->mutex async_scan_lock irq_context: 0 &dev->mutex async_scan_lock &x->wait#15 irq_context: 0 &dev->mutex async_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex shost->host_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex attribute_container_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &n->list_lock &c->lock irq_context: hardirq &virtscsi_vq->vq_lock irq_context: softirq &x->wait#16 irq_context: softirq &x->wait#16 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (&q->timeout) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&q->timeout_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 major_names_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock irq_context: 0 major_names_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 major_names_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &xa->xa_lock#8 irq_context: 0 major_names_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&sdev->requeue_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&sdev->event_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->inquiry_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &obj_hash[i].lock pool_lock irq_context: 0 subsys mutex#45 irq_context: 0 subsys mutex#45 &k->list_lock irq_context: 0 subsys mutex#45 &k->k_lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#16 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 nvmf_hosts_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &tags->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 subsys mutex#46 irq_context: 0 subsys mutex#46 &k->k_lock irq_context: 0 nvmf_transports_rwsem irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 gdp_mutex &c->lock irq_context: 0 gdp_mutex &____s->seqcount irq_context: 0 subsys mutex#47 irq_context: 0 subsys mutex#47 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 irq_context: 0 nvmet_config_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 subsys mutex#48 irq_context: 0 subsys mutex#48 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6/2 irq_context: 0 backend_mutex irq_context: 0 scsi_mib_index_lock irq_context: 0 hba_lock irq_context: 0 device_mutex irq_context: 0 device_mutex fs_reclaim irq_context: 0 device_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 device_mutex pool_lock#2 irq_context: softirq rcu_callback pcpu_lock irq_context: softirq rcu_callback percpu_ref_switch_lock irq_context: 0 &hba->device_lock irq_context: 0 rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 mtd_table_mutex irq_context: 0 part_parser_lock irq_context: 0 (kmod_concurrent_max).lock irq_context: 0 &x->wait#17 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sig->wait_chldexit irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &prev->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 mtd_table_mutex fs_reclaim irq_context: 0 mtd_table_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex pool_lock#2 irq_context: 0 mtd_table_mutex &x->wait#9 irq_context: 0 mtd_table_mutex &obj_hash[i].lock irq_context: 0 mtd_table_mutex &k->list_lock irq_context: 0 mtd_table_mutex gdp_mutex irq_context: 0 mtd_table_mutex gdp_mutex &k->list_lock irq_context: 0 mtd_table_mutex gdp_mutex fs_reclaim irq_context: 0 mtd_table_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex gdp_mutex pool_lock#2 irq_context: 0 mtd_table_mutex gdp_mutex lock irq_context: 0 mtd_table_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 mtd_table_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &c->lock irq_context: 0 mtd_table_mutex &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &zone->lock irq_context: 0 mtd_table_mutex &____s->seqcount irq_context: 0 mtd_table_mutex lock irq_context: 0 mtd_table_mutex lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex bus_type_sem irq_context: 0 mtd_table_mutex sysfs_symlink_target_lock irq_context: 0 mtd_table_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &dev->power.lock irq_context: 0 mtd_table_mutex dpm_list_mtx irq_context: 0 mtd_table_mutex req_lock irq_context: 0 mtd_table_mutex &p->pi_lock irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex &x->wait#11 irq_context: 0 mtd_table_mutex &rq->__lock irq_context: 0 mtd_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex uevent_sock_mutex irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex running_helpers_waitq.lock irq_context: 0 mtd_table_mutex subsys mutex#49 irq_context: 0 mtd_table_mutex subsys mutex#49 &k->k_lock irq_context: 0 mtd_table_mutex devtree_lock irq_context: 0 mtd_table_mutex nvmem_ida.xa_lock irq_context: 0 mtd_table_mutex nvmem_cell_mutex irq_context: 0 mtd_table_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 mtd_table_mutex &k->k_lock irq_context: 0 mtd_table_mutex &n->list_lock irq_context: 0 mtd_table_mutex &n->list_lock &c->lock irq_context: 0 mtd_table_mutex &dev->mutex &dev->power.lock irq_context: 0 mtd_table_mutex &dev->mutex &k->list_lock irq_context: 0 mtd_table_mutex &dev->mutex &k->k_lock irq_context: 0 mtd_table_mutex subsys mutex#50 irq_context: 0 mtd_table_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex (console_sem).lock irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner_lock irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock rcu_node_0 irq_context: 0 mtd_table_mutex &cfs_rq->removed.lock irq_context: 0 mtd_table_mutex rcu_read_lock pool_lock#2 irq_context: 0 mtd_table_mutex pcpu_alloc_mutex irq_context: 0 mtd_table_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 mtd_table_mutex cpu_hotplug_lock irq_context: 0 mtd_table_mutex batched_entropy_u32.lock irq_context: 0 mtd_table_mutex mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex blk_queue_ida.xa_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &n->list_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_lock cpu_hotplug_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 mtd_table_mutex &q->sysfs_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_lock &xa->xa_lock#8 irq_context: 0 mtd_table_mutex &set->tag_list_lock irq_context: 0 mtd_table_mutex bio_slab_lock irq_context: 0 mtd_table_mutex percpu_counters_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_lock_key#3 irq_context: 0 mtd_table_mutex &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &xa->xa_lock#9 irq_context: 0 mtd_table_mutex lock &q->queue_lock irq_context: 0 mtd_table_mutex lock &q->queue_lock &blkcg->lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex set->srcu irq_context: 0 mtd_table_mutex percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->queue_lock irq_context: 0 mtd_table_mutex &q->queue_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->queue_lock pcpu_lock irq_context: 0 mtd_table_mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->queue_lock percpu_counters_lock irq_context: 0 mtd_table_mutex &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 mtd_table_mutex &bdev->bd_size_lock irq_context: 0 mtd_table_mutex elv_list_lock irq_context: 0 mtd_table_mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 mtd_table_mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 mtd_table_mutex &q->debugfs_mutex irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 mtd_table_mutex subsys mutex#37 irq_context: 0 mtd_table_mutex subsys mutex#37 &k->k_lock irq_context: 0 mtd_table_mutex dev_hotplug_mutex irq_context: 0 mtd_table_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock rcu_read_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex set->srcu irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 mtd_table_mutex subsys mutex#38 irq_context: 0 mtd_table_mutex subsys mutex#38 &k->k_lock irq_context: 0 mtd_table_mutex cgwb_lock irq_context: 0 mtd_table_mutex bdi_lock irq_context: 0 mtd_table_mutex inode_hash_lock irq_context: 0 mtd_table_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 mtd_table_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex stack_depot_init_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex kthread_create_lock irq_context: 0 rtnl_mutex &p->pi_lock irq_context: 0 rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &x->wait irq_context: 0 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex irq_context: 0 rtnl_mutex crngs.lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 irq_context: 0 rtnl_mutex net_rwsem irq_context: 0 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex &x->wait#9 irq_context: 0 rtnl_mutex &k->list_lock irq_context: 0 rtnl_mutex gdp_mutex irq_context: 0 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex bus_type_sem irq_context: 0 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &dev->power.lock irq_context: 0 rtnl_mutex dpm_list_mtx irq_context: 0 rtnl_mutex uevent_sock_mutex irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex running_helpers_waitq.lock irq_context: 0 rtnl_mutex subsys mutex#17 irq_context: 0 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex &dir->lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 rtnl_mutex &n->list_lock irq_context: 0 rtnl_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex dev_hotplug_mutex irq_context: 0 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex dev_base_lock irq_context: 0 rtnl_mutex input_pool.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex batched_entropy_u32.lock irq_context: 0 rtnl_mutex &tbl->lock irq_context: 0 rtnl_mutex sysctl_lock irq_context: 0 rtnl_mutex nl_table_lock irq_context: 0 rtnl_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 rtnl_mutex lweventlist_lock irq_context: 0 rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) pool_lock#2 irq_context: 0 rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 once_lock irq_context: 0 once_lock crngs.lock irq_context: 0 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work) irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work) &obj_hash[i].lock irq_context: 0 (inet6addr_validator_chain).rwsem irq_context: 0 (inetaddr_validator_chain).rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 subsys mutex#51 irq_context: 0 subsys mutex#51 &k->k_lock irq_context: 0 gpio_lookup_lock irq_context: 0 mdio_board_lock irq_context: 0 mode_list_lock irq_context: 0 &dev->mutex stack_depot_init_mutex irq_context: 0 &dev->mutex napi_hash_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex cpu_hotplug_lock &md->mutex irq_context: 0 &dev->mutex cpu_hotplug_lock &irq_desc_lock_class irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex fs_reclaim irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex pool_lock#2 irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex text_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex krc.lock irq_context: 0 &dev->mutex rtnl_mutex irq_context: 0 &dev->mutex rtnl_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex pool_lock#2 irq_context: hardirq &irq_desc_lock_class tmp_mask_lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex &obj_hash[i].lock irq_context: hardirq &irq_desc_lock_class tmp_mask_lock vector_lock irq_context: 0 &dev->mutex rtnl_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 &dev->mutex rtnl_mutex net_rwsem irq_context: 0 &dev->mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &dev->mutex rtnl_mutex &x->wait#9 irq_context: 0 &dev->mutex rtnl_mutex &c->lock irq_context: 0 &dev->mutex rtnl_mutex &k->list_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &dev->mutex rtnl_mutex lock irq_context: 0 &dev->mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rtnl_mutex bus_type_sem irq_context: 0 &dev->mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex &dev->power.lock irq_context: 0 &dev->mutex rtnl_mutex dpm_list_mtx irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex rtnl_mutex &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#17 irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex &dir->lock#2 irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 &dev->mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &dev->mutex rtnl_mutex dev_base_lock irq_context: 0 &dev->mutex rtnl_mutex input_pool.lock irq_context: 0 &dev->mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 &dev->mutex rtnl_mutex &tbl->lock irq_context: 0 &dev->mutex rtnl_mutex sysctl_lock irq_context: 0 &dev->mutex rtnl_mutex nl_table_lock irq_context: 0 &dev->mutex rtnl_mutex nl_table_wait.lock irq_context: hardirq|softirq &irq_desc_lock_class vector_lock irq_context: 0 &dev->mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 &dev->mutex lweventlist_lock irq_context: 0 &dev->mutex lweventlist_lock pool_lock#2 irq_context: 0 &dev->mutex lweventlist_lock &dir->lock#2 irq_context: 0 &dev->mutex &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&vi->config_work) irq_context: 0 l3mdev_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#15 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#43 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#43 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bio_slab_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex sd_index_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#52 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#52 &rq->__lock irq_context: 0 subsys mutex#53 irq_context: 0 subsys mutex#53 &k->k_lock irq_context: 0 compressor_list_lock irq_context: 0 compressor_list_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#52 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock sg_index_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock sg_index_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 chrdevs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 subsys mutex#54 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 subsys mutex#54 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex bsg_minor_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex chrdevs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#55 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#55 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_scan_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &meta->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex elv_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#37 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#37 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dev_hotplug_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#38 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#38 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex cgwb_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bdi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex inode_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bdev_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &mapping->private_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &dd->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &folio_wait_table[i] irq_context: 0 (wq_completion)kblockd irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &dd->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq &ret->b_uptodate_lock irq_context: softirq &folio_wait_table[i] irq_context: softirq &folio_wait_table[i] &p->pi_lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex subsys mutex#37 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex subsys mutex#37 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#9 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex inode_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex purge_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &lruvec->lru_lock irq_context: 0 hwsim_radio_lock irq_context: 0 subsys mutex#56 irq_context: 0 subsys mutex#56 &k->k_lock irq_context: 0 deferred_probe_mutex irq_context: 0 rtnl_mutex param_lock irq_context: 0 rtnl_mutex param_lock rate_ctrl_mutex irq_context: 0 rtnl_mutex (console_sem).lock irq_context: 0 rtnl_mutex console_owner_lock irq_context: 0 rtnl_mutex console_owner irq_context: 0 rtnl_mutex &rdev->wiphy.mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex kobj_ns_type_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx running_helpers_waitq.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#57 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#57 &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 rtnl_mutex &base->lock irq_context: 0 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 rfkill_global_mutex irq_context: 0 rfkill_global_mutex fs_reclaim irq_context: 0 rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rfkill_global_mutex pool_lock#2 irq_context: 0 rfkill_global_mutex &k->list_lock irq_context: 0 rfkill_global_mutex lock irq_context: 0 rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rfkill_global_mutex bus_type_sem irq_context: 0 rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 rfkill_global_mutex &c->lock irq_context: 0 rfkill_global_mutex &pcp->lock &zone->lock irq_context: 0 rfkill_global_mutex &zone->lock irq_context: 0 rfkill_global_mutex &____s->seqcount irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 rfkill_global_mutex &dev->power.lock irq_context: 0 rfkill_global_mutex dpm_list_mtx irq_context: 0 rfkill_global_mutex &rfkill->lock irq_context: 0 rfkill_global_mutex uevent_sock_mutex irq_context: 0 rfkill_global_mutex &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex running_helpers_waitq.lock irq_context: 0 rfkill_global_mutex &k->k_lock irq_context: 0 rfkill_global_mutex subsys mutex#41 irq_context: 0 rfkill_global_mutex subsys mutex#41 &k->k_lock irq_context: 0 rfkill_global_mutex triggers_list_lock irq_context: 0 rfkill_global_mutex leds_list_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &meta->lock irq_context: 0 rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex crngs.lock irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rtnl_mutex &zone->lock irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#58 irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#58 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 &dev->mutex rtnl_mutex crngs.lock irq_context: 0 &dev->mutex rtnl_mutex &sdata->sec_mtx irq_context: 0 &dev->mutex rtnl_mutex &sdata->sec_mtx &sec->lock irq_context: 0 &dev->mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex rtnl_mutex &local->iflist_mtx#2 irq_context: 0 &dev->mutex hwsim_phys_lock irq_context: 0 &dev->mutex nl_table_lock irq_context: 0 &dev->mutex nl_table_wait.lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &dev->mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex hwsim_phys_lock fs_reclaim irq_context: 0 &dev->mutex hwsim_phys_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex hwsim_phys_lock pool_lock#2 irq_context: 0 xdomain_lock irq_context: 0 xdomain_lock fs_reclaim irq_context: 0 xdomain_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 xdomain_lock pool_lock#2 irq_context: 0 ioctl_mutex irq_context: 0 address_handler_list_lock irq_context: 0 card_mutex irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock/1 irq_context: 0 subsys mutex#59 irq_context: 0 subsys mutex#59 &k->k_lock irq_context: 0 dpm_list_mtx &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &obj_hash[i].lock irq_context: 0 &tsk->futex_exit_mutex pool_lock#2 irq_context: 0 &x->wait#18 irq_context: 0 &x->wait#18 &p->pi_lock irq_context: 0 &x->wait#18 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#18 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &txlock irq_context: 0 &txlock &list->lock#3 irq_context: 0 &txlock &txwq irq_context: 0 &iocq[i].lock irq_context: 0 &iocq[i].lock &ktiowq[i] irq_context: 0 &txwq irq_context: 0 &txwq &p->pi_lock irq_context: 0 &txwq &p->pi_lock &rq->__lock irq_context: 0 &txwq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh pool_lock#2 irq_context: 0 subsys mutex#60 irq_context: 0 subsys mutex#60 &k->k_lock irq_context: 0 usb_bus_idr_lock irq_context: 0 usb_bus_idr_lock (usb_notifier_list).rwsem irq_context: softirq net/core/link_watch.c:31 irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 table_lock irq_context: 0 table_lock &k->list_lock irq_context: 0 table_lock fs_reclaim irq_context: 0 table_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 table_lock &rq->__lock irq_context: 0 table_lock batched_entropy_u8.lock irq_context: 0 table_lock kfence_freelist_lock irq_context: 0 table_lock pool_lock#2 irq_context: 0 table_lock lock irq_context: 0 table_lock lock kernfs_idr_lock irq_context: 0 table_lock &root->kernfs_rwsem irq_context: 0 table_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 table_lock &k->k_lock irq_context: 0 table_lock uevent_sock_mutex irq_context: 0 table_lock &pcp->lock &zone->lock irq_context: 0 table_lock &zone->lock irq_context: 0 table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 table_lock &____s->seqcount irq_context: 0 table_lock rcu_read_lock pool_lock#2 irq_context: 0 table_lock &obj_hash[i].lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 table_lock running_helpers_waitq.lock irq_context: 0 table_lock (console_sem).lock irq_context: 0 table_lock console_lock console_srcu console_owner_lock irq_context: 0 table_lock console_lock console_srcu console_owner irq_context: 0 table_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 table_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 table_lock &c->lock irq_context: 0 table_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_node_0 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 table_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) kfence_freelist_lock irq_context: 0 &dev->mutex devtree_lock irq_context: 0 &dev->mutex usb_bus_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem pin_fs_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &x->wait#9 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &k->list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &k->list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem bus_type_sem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &dev->power.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem dpm_list_mtx irq_context: 0 &dev->mutex (usb_notifier_list).rwsem req_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &x->wait#11 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem uevent_sock_mutex irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem running_helpers_waitq.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &k->k_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem subsys mutex#60 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem subsys mutex#60 &k->k_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem mon_lock irq_context: 0 &dev->mutex usb_port_peer_mutex irq_context: 0 &dev->mutex device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &rq->__lock irq_context: softirq &bh->lock irq_context: softirq lock#6 irq_context: softirq lock#6 kcov_remote_lock irq_context: softirq &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock (console_sem).lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock input_pool.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock req_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#11 irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &k->k_lock irq_context: softirq drivers/block/floppy.c:640 irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &xa->xa_lock#8 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) percpu_ref_switch_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_links_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex set_config_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex devtree_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &new_driver->dynids.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class irq_resend_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) klist_remove_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&motor_off_timer[drive]) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&sq->pending_timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (work_completion)(&td->dispatch_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock (&sq->pending_timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx pm_qos_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex component_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &lock->wait_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex subsys mutex#61 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex (usb_notifier_list).rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &lock->wait_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &dum_hcd->dum->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &bh->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &x->wait#19 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#19 &p->pi_lock irq_context: softirq &x->wait#19 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#19 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex (&timer.timer) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex hcd_root_hub_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)usb_hub_wq irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) lock#6 irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex usb_bus_idr_lock subsys mutex#61 irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &hub->irq_urb_lock irq_context: 0 &dev->mutex usb_bus_idr_lock (&hub->irq_urb_retry) irq_context: 0 &dev->mutex usb_bus_idr_lock &base->lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_urb_unlink_lock irq_context: softirq usb_kill_urb_queue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock (work_completion)(&hub->tt.clear_work) irq_context: 0 &dev->mutex usb_bus_idr_lock &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock &dev->power.wait_queue irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock kernfs_idr_lock pool_lock#2 irq_context: softirq rcu_callback percpu_ref_switch_waitq.lock irq_context: softirq rcu_callback rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback percpu_counters_lock irq_context: softirq rcu_callback pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex udc_lock irq_context: 0 &dev->mutex subsys mutex#62 irq_context: 0 &dev->mutex subsys mutex#62 &k->k_lock irq_context: 0 &dev->mutex gadget_id_numbers.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events kernfs_notify_work irq_context: 0 (wq_completion)events kernfs_notify_work kernfs_notify_lock irq_context: 0 (wq_completion)events kernfs_notify_work &root->kernfs_supers_rwsem irq_context: 0 &dev->mutex subsys mutex#63 irq_context: 0 &dev->mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex gdp_mutex &zone->lock irq_context: 0 func_lock irq_context: 0 g_tf_lock irq_context: 0 fs_reclaim &cfs_rq->removed.lock irq_context: 0 fs_reclaim &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &vhci_hcd->vhci->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &vhci_hcd->vhci->lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &meta->lock irq_context: 0 &dev->mutex usb_bus_idr_lock quarantine_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &meta->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem batched_entropy_u8.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &type->i_mutex_dir_key#2 &c->lock irq_context: 0 &type->i_mutex_dir_key#2 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#2 &zone->lock irq_context: 0 &type->i_mutex_dir_key#2 &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_node_0 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock rcu_node_0 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq (&vblank->disable_timer) irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) pool_lock#2 irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) fs_reclaim irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &vhci_hcd->vhci->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &obj_hash[i].lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock &bh->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock &p->pi_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &x->wait#19 irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &base->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &pool->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &rq->__lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) (&timer.timer) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &c->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &zone->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &____s->seqcount irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &hub->irq_urb_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) (&hub->irq_urb_retry) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_urb_unlink_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) device_state_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_urb_list_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: softirq (&q->timeout) irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) usb_kill_urb_queue.lock irq_context: softirq usb_kill_urb_queue.lock &p->pi_lock irq_context: softirq usb_kill_urb_queue.lock &p->pi_lock &rq->__lock irq_context: softirq usb_kill_urb_queue.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) (work_completion)(&hub->tt.clear_work) irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 i8042_lock irq_context: 0 &dev->mutex i8042_lock irq_context: 0 &dev->mutex i8042_lock (console_sem).lock irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: hardirq i8042_lock &x->wait#20 irq_context: 0 &dev->mutex &x->wait#20 irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class irq_resend_lock irq_context: 0 &dev->mutex &desc->request_mutex proc_subdir_lock irq_context: 0 &dev->mutex &desc->request_mutex &ent->pde_unload_lock irq_context: 0 &dev->mutex &desc->request_mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex &desc->request_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex &desc->request_mutex pool_lock#2 irq_context: 0 &dev->mutex serio_event_lock irq_context: 0 &dev->mutex serio_event_lock pool_lock#2 irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long irq_context: 0 (wq_completion)events_long serio_event_work irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex serio_event_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex semaphore->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex subsys mutex#64 irq_context: 0 reading_mutex &x->wait#12 irq_context: 0 &dev->mutex serio_event_lock &c->lock irq_context: 0 &dev->mutex serio_event_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex serio_event_lock &zone->lock irq_context: 0 &dev->mutex serio_event_lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 input_ida.xa_lock irq_context: 0 input_ida.xa_lock pool_lock#2 irq_context: 0 gdp_mutex &pcp->lock &zone->lock irq_context: 0 gdp_mutex &zone->lock irq_context: 0 subsys mutex#30 irq_context: 0 subsys mutex#30 &k->k_lock irq_context: 0 misc_mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 input_mutex input_ida.xa_lock irq_context: 0 input_mutex fs_reclaim irq_context: 0 input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 input_mutex pool_lock#2 irq_context: 0 input_mutex &x->wait#9 irq_context: 0 input_mutex &obj_hash[i].lock irq_context: 0 input_mutex &dev->mutex#2 irq_context: 0 input_mutex chrdevs_lock irq_context: 0 input_mutex &k->list_lock irq_context: 0 input_mutex lock irq_context: 0 input_mutex lock kernfs_idr_lock irq_context: 0 input_mutex &root->kernfs_rwsem irq_context: 0 input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 input_mutex bus_type_sem irq_context: 0 input_mutex sysfs_symlink_target_lock irq_context: 0 input_mutex &c->lock irq_context: 0 input_mutex &____s->seqcount irq_context: 0 input_mutex &root->kernfs_rwsem irq_context: 0 input_mutex &dev->power.lock irq_context: 0 input_mutex dpm_list_mtx irq_context: 0 input_mutex req_lock irq_context: 0 input_mutex &p->pi_lock irq_context: 0 input_mutex &p->pi_lock &rq->__lock irq_context: 0 input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex &x->wait#11 irq_context: 0 input_mutex uevent_sock_mutex irq_context: 0 input_mutex &obj_hash[i].lock pool_lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex running_helpers_waitq.lock irq_context: 0 input_mutex &k->k_lock irq_context: 0 input_mutex subsys mutex#30 irq_context: 0 input_mutex subsys mutex#30 &k->k_lock irq_context: 0 input_mutex &pcp->lock &zone->lock irq_context: 0 input_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 input_mutex &cfs_rq->removed.lock irq_context: 0 serio_event_lock irq_context: 0 serio_event_lock pool_lock#2 irq_context: 0 serio_event_lock rcu_read_lock &pool->lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &obj_hash[i].lock irq_context: hardirq &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &base->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &pool->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &new_driver->dynids.lock irq_context: 0 &dev->mutex rtc_ida.xa_lock irq_context: 0 &dev->mutex rtc_lock irq_context: 0 &dev->mutex &rtc->ops_lock irq_context: 0 &dev->mutex &rtc->ops_lock rtc_lock irq_context: 0 &dev->mutex chrdevs_lock irq_context: 0 &dev->mutex req_lock irq_context: 0 &dev->mutex &x->wait#11 irq_context: 0 &dev->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex subsys mutex#27 irq_context: 0 &dev->mutex subsys mutex#27 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 &c->lock irq_context: 0 &dev->mutex subsys mutex#27 &pcp->lock &zone->lock irq_context: 0 &dev->mutex subsys mutex#27 &zone->lock irq_context: 0 &dev->mutex subsys mutex#27 &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#27 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &x->wait#9 irq_context: 0 &dev->mutex subsys mutex#27 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 platform_devid_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#27 &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 lock irq_context: 0 &dev->mutex subsys mutex#27 lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 bus_type_sem irq_context: 0 &dev->mutex subsys mutex#27 sysfs_symlink_target_lock irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->power.lock irq_context: 0 &dev->mutex subsys mutex#27 dpm_list_mtx irq_context: 0 &dev->mutex subsys mutex#27 &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex subsys mutex#27 uevent_sock_mutex irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#27 running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex device_links_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#3 irq_context: 0 &dev->mutex subsys mutex#27 &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 wakeup_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#15 irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#15 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 events_lock irq_context: 0 &dev->mutex subsys mutex#27 rtcdev_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex (&timer.timer) irq_context: hardirq &serio->lock &ps2dev->wait irq_context: hardirq &serio->lock &ps2dev->wait &p->pi_lock irq_context: hardirq &serio->lock &ps2dev->wait &p->pi_lock &rq->__lock irq_context: hardirq &serio->lock &ps2dev->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &dev->mutex#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access subsys mutex#65 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access subsys mutex#65 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access leds_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &trig->leddev_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &dev->event_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex input_ida.xa_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex chrdevs_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex req_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &x->wait#11 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex input_devices_poll_wait.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &base->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex (&timer.timer) irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &serio->lock &dev->power.lock irq_context: hardirq &serio->lock &dev->event_lock#2 irq_context: 0 g_smscore_deviceslock irq_context: 0 g_smscore_deviceslock fs_reclaim irq_context: 0 g_smscore_deviceslock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 g_smscore_deviceslock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex input_ida.xa_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &dev->mutex#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex chrdevs_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex req_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &x->wait#11 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &mousedev->mutex/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex input_devices_poll_wait.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cx231xx_devlist_mutex irq_context: 0 em28xx_devlist_mutex irq_context: 0 pvr2_context_sync_data.lock irq_context: 0 &dev->mutex core_lock irq_context: 0 &dev->mutex core_lock fs_reclaim irq_context: 0 &dev->mutex core_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex core_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex core_lock &zone->lock irq_context: 0 &dev->mutex core_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex core_lock &____s->seqcount irq_context: 0 &dev->mutex core_lock pool_lock#2 irq_context: 0 &dev->mutex core_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex core_lock &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem i2c_dev_list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &x->wait#9 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem chrdevs_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &k->list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &k->list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex fs_reclaim irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem bus_type_sem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &c->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &____s->seqcount irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &dev->power.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem dpm_list_mtx irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem req_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &x->wait#11 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem uevent_sock_mutex irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem running_helpers_waitq.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &k->k_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#66 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#66 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#67 irq_context: 0 &dev->mutex core_lock &k->list_lock irq_context: 0 &dev->mutex core_lock &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock irq_context: 0 &dev->mutex dvbdev_register_lock (console_sem).lock irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex dvbdev_register_lock &rq->__lock irq_context: 0 &dev->mutex (kmod_concurrent_max).lock irq_context: 0 &dev->mutex &x->wait#17 irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex &dev->mutex lock irq_context: 0 &dev->mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex frontend_mutex irq_context: 0 &dev->mutex frontend_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex (console_sem).lock irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex frontend_mutex &rq->__lock irq_context: 0 &dev->mutex frontend_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex frontend_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex frontend_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock minor_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &xa->xa_lock#10 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock (console_sem).lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &zone->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &____s->seqcount irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &x->wait#9 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &k->list_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock bus_type_sem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &dev->power.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock dpm_list_mtx irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &c->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock req_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &x->wait#11 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &rq->__lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock uevent_sock_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &k->k_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock subsys mutex#68 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock subsys mutex#68 &k->k_lock irq_context: 0 &dev->mutex init_mm.page_table_lock irq_context: 0 &dev->mutex &dmxdev->lock irq_context: 0 &dev->mutex dvbdev_register_lock fs_reclaim irq_context: 0 &dev->mutex dvbdev_register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex dvbdev_register_lock pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock minor_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock &xa->xa_lock#10 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex irq_context: 0 &dev->mutex dvbdev_register_lock &c->lock irq_context: 0 &dev->mutex dvbdev_register_lock &xa->xa_lock#10 pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock &x->wait#9 irq_context: 0 &dev->mutex dvbdev_register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock &k->list_lock irq_context: 0 &dev->mutex dvbdev_register_lock gdp_mutex irq_context: 0 &dev->mutex dvbdev_register_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex dvbdev_register_lock lock irq_context: 0 &dev->mutex dvbdev_register_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock bus_type_sem irq_context: 0 &dev->mutex dvbdev_register_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &dev->power.lock irq_context: 0 &dev->mutex dvbdev_register_lock dpm_list_mtx irq_context: 0 &dev->mutex dvbdev_register_lock req_lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock irq_context: 0 &dev->mutex dvbdev_register_lock &x->wait#11 irq_context: 0 &dev->mutex dvbdev_register_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex dvbdev_register_lock uevent_sock_mutex irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex dvbdev_register_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex dvbdev_register_lock &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock subsys mutex#68 irq_context: 0 &dev->mutex dvbdev_register_lock subsys mutex#68 &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &dvbdemux->mutex irq_context: 0 &dev->mutex media_devnode_lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex subsys mutex#69 irq_context: 0 &dev->mutex videodev_lock irq_context: 0 &dev->mutex subsys mutex#70 irq_context: 0 &dev->mutex subsys mutex#70 &k->k_lock irq_context: 0 &dev->mutex &xa->xa_lock#10 irq_context: 0 &dev->mutex &mdev->graph_mutex irq_context: 0 &dev->mutex &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex vimc_sensor:393:(&vsensor->hdl)->_lock irq_context: 0 &dev->mutex &v4l2_dev->lock irq_context: 0 &dev->mutex vimc_debayer:578:(&vdebayer->hdl)->_lock irq_context: 0 &dev->mutex vimc_lens:61:(&vlens->hdl)->_lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex tk_core.seq.seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1618:(hdl_fb)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &obj_hash[i].lock irq_context: 0 &adap->kthread_waitq irq_context: 0 &dev->mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->cec_xfers_slock irq_context: 0 &dev->mutex cec_devnode_lock irq_context: 0 &dev->kthread_waitq_cec irq_context: 0 &dev->mutex subsys mutex#71 irq_context: 0 &dev->mutex pin_fs_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &dev->mutex &adap->lock irq_context: 0 &dev->mutex &adap->lock tk_core.seq.seqcount irq_context: 0 &dev->mutex &adap->lock &adap->devnode.lock_fhs irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &____s->seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &rq->__lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 &dev->mutex batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u8.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 kfence_freelist_lock irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 ptp_clocks_map.xa_lock irq_context: 0 subsys mutex#72 irq_context: 0 subsys mutex#72 &k->k_lock irq_context: 0 pers_lock irq_context: 0 _lock irq_context: 0 dm_bufio_clients_lock irq_context: 0 _ps_lock irq_context: 0 _lock#2 irq_context: 0 _lock#3 irq_context: 0 register_lock#2 irq_context: 0 subsys mutex#73 irq_context: 0 subsys mutex#73 &k->k_lock irq_context: 0 bp_lock irq_context: 0 bp_lock irq_context: 0 subsys mutex#74 irq_context: 0 subsys mutex#74 &k->k_lock irq_context: softirq (&dsp_spl_tl) irq_context: softirq (&dsp_spl_tl) dsp_lock irq_context: softirq (&dsp_spl_tl) dsp_lock iclock_lock irq_context: softirq (&dsp_spl_tl) dsp_lock iclock_lock tk_core.seq.seqcount irq_context: softirq (&dsp_spl_tl) dsp_lock &obj_hash[i].lock irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock &obj_hash[i].lock irq_context: 0 leds_list_lock &led_cdev->trigger_lock irq_context: 0 rtnl_mutex lock#7 irq_context: 0 intf_mutex irq_context: 0 iscsi_transport_lock irq_context: 0 subsys mutex#75 irq_context: 0 subsys mutex#75 &k->k_lock irq_context: 0 link_ops_rwsem irq_context: 0 &tx_task->waiting irq_context: 0 subsys mutex#76 irq_context: 0 subsys mutex#76 &k->k_lock irq_context: 0 service_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &fs->lock &dentry->d_lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &zone->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 vsock_register_mutex irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 comedi_drivers_list_lock irq_context: 0 subsys mutex#77 irq_context: 0 subsys mutex#77 &k->k_lock irq_context: 0 snd_ctl_layer_rwsem irq_context: 0 snd_card_mutex irq_context: 0 snd_ioctl_rwsem irq_context: 0 strings irq_context: 0 strings fs_reclaim irq_context: 0 strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 strings pool_lock#2 irq_context: 0 register_mutex irq_context: 0 sound_mutex irq_context: 0 sound_mutex fs_reclaim irq_context: 0 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sound_mutex pool_lock#2 irq_context: 0 sound_mutex &k->list_lock irq_context: 0 sound_mutex gdp_mutex irq_context: 0 sound_mutex gdp_mutex &k->list_lock irq_context: 0 sound_mutex lock irq_context: 0 sound_mutex lock kernfs_idr_lock irq_context: 0 sound_mutex &root->kernfs_rwsem irq_context: 0 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sound_mutex bus_type_sem irq_context: 0 sound_mutex sysfs_symlink_target_lock irq_context: 0 sound_mutex &root->kernfs_rwsem irq_context: 0 sound_mutex &dev->power.lock irq_context: 0 sound_mutex dpm_list_mtx irq_context: 0 sound_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sound_mutex req_lock irq_context: 0 sound_mutex &p->pi_lock irq_context: 0 sound_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sound_mutex &rq->__lock irq_context: 0 sound_mutex &x->wait#11 irq_context: 0 sound_mutex &obj_hash[i].lock irq_context: 0 sound_mutex &c->lock irq_context: 0 sound_mutex &____s->seqcount irq_context: 0 sound_mutex uevent_sock_mutex irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sound_mutex running_helpers_waitq.lock irq_context: 0 sound_mutex subsys mutex#77 irq_context: 0 sound_mutex subsys mutex#77 &k->k_lock irq_context: 0 register_mutex#2 irq_context: 0 register_mutex#3 irq_context: 0 register_mutex#3 fs_reclaim irq_context: 0 register_mutex#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#3 pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex irq_context: 0 register_mutex#3 sound_mutex fs_reclaim irq_context: 0 register_mutex#3 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#3 sound_mutex pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex &k->list_lock irq_context: 0 register_mutex#3 sound_mutex gdp_mutex irq_context: 0 register_mutex#3 sound_mutex gdp_mutex &k->list_lock irq_context: 0 register_mutex#3 sound_mutex lock irq_context: 0 register_mutex#3 sound_mutex lock kernfs_idr_lock irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_mutex#3 sound_mutex bus_type_sem irq_context: 0 register_mutex#3 sound_mutex sysfs_symlink_target_lock irq_context: 0 register_mutex#3 sound_mutex &c->lock irq_context: 0 register_mutex#3 sound_mutex &____s->seqcount irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#3 sound_mutex &dev->power.lock irq_context: 0 register_mutex#3 sound_mutex dpm_list_mtx irq_context: 0 register_mutex#3 sound_mutex req_lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 sound_mutex &rq->__lock irq_context: 0 register_mutex#3 sound_mutex &x->wait#11 irq_context: 0 register_mutex#3 sound_mutex &obj_hash[i].lock irq_context: 0 register_mutex#3 sound_mutex uevent_sock_mutex irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 sound_mutex running_helpers_waitq.lock irq_context: 0 register_mutex#3 sound_mutex &pcp->lock &zone->lock irq_context: 0 register_mutex#3 sound_mutex &zone->lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex subsys mutex#77 irq_context: 0 register_mutex#3 sound_mutex subsys mutex#77 &k->k_lock irq_context: 0 register_mutex#3 &c->lock irq_context: 0 register_mutex#3 &____s->seqcount irq_context: 0 register_mutex#3 clients_lock irq_context: 0 &client->ports_mutex irq_context: 0 &client->ports_mutex &client->ports_lock irq_context: 0 register_mutex#4 irq_context: 0 register_mutex#4 fs_reclaim irq_context: 0 register_mutex#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#4 pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex irq_context: 0 register_mutex#4 sound_oss_mutex fs_reclaim irq_context: 0 register_mutex#4 sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#4 sound_oss_mutex pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex sound_loader_lock irq_context: 0 register_mutex#4 sound_oss_mutex &x->wait#9 irq_context: 0 register_mutex#4 sound_oss_mutex &obj_hash[i].lock irq_context: 0 register_mutex#4 sound_oss_mutex &k->list_lock irq_context: 0 register_mutex#4 sound_oss_mutex gdp_mutex irq_context: 0 register_mutex#4 sound_oss_mutex gdp_mutex &k->list_lock irq_context: 0 register_mutex#4 sound_oss_mutex lock irq_context: 0 register_mutex#4 sound_oss_mutex lock kernfs_idr_lock irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex bus_type_sem irq_context: 0 register_mutex#4 sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex &c->lock irq_context: 0 register_mutex#4 sound_oss_mutex &____s->seqcount irq_context: 0 register_mutex#4 sound_oss_mutex &dev->power.lock irq_context: 0 register_mutex#4 sound_oss_mutex dpm_list_mtx irq_context: 0 register_mutex#4 sound_oss_mutex req_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 register_mutex#4 sound_oss_mutex &x->wait#11 irq_context: 0 register_mutex#4 sound_oss_mutex &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#4 sound_oss_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 register_mutex#4 sound_oss_mutex uevent_sock_mutex irq_context: 0 register_mutex#4 sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#4 sound_oss_mutex running_helpers_waitq.lock irq_context: 0 register_mutex#4 sound_oss_mutex subsys mutex#77 irq_context: 0 register_mutex#4 sound_oss_mutex subsys mutex#77 &k->k_lock irq_context: 0 register_mutex#4 fs_reclaim &rq->__lock irq_context: 0 register_mutex#4 &rq->__lock irq_context: 0 register_mutex#4 batched_entropy_u8.lock irq_context: 0 register_mutex#4 kfence_freelist_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#4 sound_oss_mutex &cfs_rq->removed.lock irq_context: 0 clients_lock irq_context: 0 &client->ports_lock irq_context: 0 &grp->list_mutex/1 irq_context: 0 &grp->list_mutex#2 irq_context: 0 &grp->list_mutex#2 &grp->list_lock irq_context: 0 &grp->list_mutex/1 clients_lock irq_context: 0 &grp->list_mutex/1 &client->ports_lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events async_lookup_work irq_context: 0 (wq_completion)events async_lookup_work fs_reclaim irq_context: 0 (wq_completion)events async_lookup_work fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events async_lookup_work pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work clients_lock irq_context: 0 (wq_completion)events async_lookup_work &client->ports_lock irq_context: 0 (wq_completion)events async_lookup_work snd_card_mutex irq_context: 0 (wq_completion)events async_lookup_work (kmod_concurrent_max).lock irq_context: 0 (wq_completion)events async_lookup_work &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events async_lookup_work &x->wait#17 irq_context: 0 (wq_completion)events async_lookup_work &pool->lock irq_context: 0 (wq_completion)events async_lookup_work &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &grp->list_mutex/1 register_lock#3 irq_context: 0 &grp->list_mutex/1 fs_reclaim irq_context: 0 &grp->list_mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &grp->list_mutex/1 pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work running_helpers_waitq.lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work autoload_work irq_context: 0 (wq_completion)events async_lookup_work &x->wait#10 irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events autoload_work irq_context: 0 (wq_completion)events autoload_work &k->list_lock irq_context: 0 (wq_completion)events autoload_work &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex snd_card_mutex irq_context: 0 &dev->mutex &entry->access irq_context: 0 &dev->mutex info_mutex irq_context: 0 &dev->mutex info_mutex proc_subdir_lock irq_context: 0 &dev->mutex info_mutex fs_reclaim irq_context: 0 &dev->mutex info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex info_mutex pool_lock#2 irq_context: 0 &dev->mutex info_mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex info_mutex proc_subdir_lock irq_context: 0 &dev->mutex &card->controls_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem &card->ctl_files_rwlock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#77 irq_context: 0 &dev->mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 irq_context: 0 &dev->mutex register_mutex#2 fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 sound_mutex pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &k->list_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex bus_type_sem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &dev->power.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex dpm_list_mtx irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &c->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex req_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &x->wait#11 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex uevent_sock_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex subsys mutex#77 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 register_mutex irq_context: 0 &dev->mutex register_mutex#2 &c->lock irq_context: 0 &dev->mutex register_mutex#2 &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 &zone->lock irq_context: 0 &dev->mutex register_mutex#2 &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex sound_loader_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &x->wait#9 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &k->list_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex bus_type_sem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &c->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &dev->power.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex dpm_list_mtx irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex req_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &x->wait#11 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex uevent_sock_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex subsys mutex#77 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 strings irq_context: 0 &dev->mutex register_mutex#2 strings fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 strings pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 &entry->access irq_context: 0 &dev->mutex register_mutex#2 info_mutex irq_context: 0 &dev->mutex sound_mutex irq_context: 0 &dev->mutex sound_mutex fs_reclaim irq_context: 0 &dev->mutex sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex sound_mutex pool_lock#2 irq_context: 0 &dev->mutex sound_mutex &k->list_lock irq_context: 0 &dev->mutex sound_mutex lock irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex sound_mutex bus_type_sem irq_context: 0 &dev->mutex sound_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_mutex &c->lock irq_context: 0 &dev->mutex sound_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex sound_mutex &zone->lock irq_context: 0 &dev->mutex sound_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_mutex &dev->power.lock irq_context: 0 &dev->mutex sound_mutex dpm_list_mtx irq_context: 0 &dev->mutex sound_mutex req_lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_mutex &rq->__lock irq_context: 0 &dev->mutex sound_mutex &x->wait#11 irq_context: 0 &dev->mutex sound_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sound_mutex uevent_sock_mutex irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex sound_mutex &k->k_lock irq_context: 0 &dev->mutex sound_mutex subsys mutex#77 irq_context: 0 &dev->mutex sound_mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex &card->controls_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem snd_ctl_led_mutex irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &x->wait#9 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &k->list_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem bus_type_sem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &dev->power.lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem dpm_list_mtx irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &k->k_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &____s->seqcount irq_context: 0 &dev->mutex info_mutex &c->lock irq_context: 0 &dev->mutex info_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex info_mutex &zone->lock irq_context: 0 &dev->mutex info_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex irq_context: 0 &dev->mutex sound_oss_mutex fs_reclaim irq_context: 0 &dev->mutex sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex sound_oss_mutex pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex sound_loader_lock irq_context: 0 &dev->mutex sound_oss_mutex &x->wait#9 irq_context: 0 &dev->mutex sound_oss_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sound_oss_mutex &k->list_lock irq_context: 0 &dev->mutex sound_oss_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex lock irq_context: 0 &dev->mutex sound_oss_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex sound_oss_mutex bus_type_sem irq_context: 0 &dev->mutex sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_oss_mutex &c->lock irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_oss_mutex &dev->power.lock irq_context: 0 &dev->mutex sound_oss_mutex dpm_list_mtx irq_context: 0 &dev->mutex sound_oss_mutex req_lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex &x->wait#11 irq_context: 0 &dev->mutex sound_oss_mutex uevent_sock_mutex irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex sound_oss_mutex &k->k_lock irq_context: 0 &dev->mutex sound_oss_mutex subsys mutex#77 irq_context: 0 &dev->mutex sound_oss_mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex strings irq_context: 0 &dev->mutex strings fs_reclaim irq_context: 0 &dev->mutex strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex strings pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem &____s->seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex info_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex info_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#5 irq_context: 0 &dev->mutex register_mutex#3 irq_context: 0 &dev->mutex register_mutex#3 fs_reclaim irq_context: 0 &dev->mutex register_mutex#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#3 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#3 clients_lock irq_context: 0 &dev->mutex clients_lock irq_context: 0 &dev->mutex &client->ports_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 irq_context: 0 &dev->mutex &grp->list_mutex/1 clients_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &client->ports_lock irq_context: 0 &dev->mutex &client->ports_mutex irq_context: 0 &dev->mutex &client->ports_mutex &client->ports_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 register_lock#3 irq_context: 0 &dev->mutex &grp->list_mutex/1 fs_reclaim irq_context: 0 &dev->mutex &grp->list_mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &grp->list_mutex/1 pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#3 &c->lock irq_context: 0 &dev->mutex register_mutex#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#3 &zone->lock irq_context: 0 &dev->mutex register_mutex#3 &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex failover_lock irq_context: 0 llc_sap_list_lock irq_context: 0 llc_sap_list_lock pool_lock#2 irq_context: 0 act_id_mutex irq_context: 0 act_id_mutex fs_reclaim irq_context: 0 act_id_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 act_id_mutex pool_lock#2 irq_context: 0 act_mod_lock irq_context: 0 act_id_mutex &c->lock irq_context: 0 act_id_mutex &pcp->lock &zone->lock irq_context: 0 act_id_mutex &zone->lock irq_context: 0 act_id_mutex &____s->seqcount irq_context: 0 ife_mod_lock irq_context: 0 pernet_ops_rwsem nf_connlabels_lock irq_context: 0 cls_mod_lock irq_context: 0 ematch_mod_lock irq_context: 0 sock_diag_table_mutex irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem kfence_freelist_lock irq_context: 0 nfnl_subsys_acct irq_context: 0 nfnl_subsys_queue irq_context: 0 nfnl_subsys_ulog irq_context: 0 nf_log_mutex irq_context: 0 nfnl_subsys_osf irq_context: 0 nf_sockopt_mutex irq_context: 0 nfnl_subsys_ctnetlink irq_context: 0 nfnl_subsys_ctnetlink_exp irq_context: 0 pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 nfnl_subsys_cttimeout irq_context: 0 nfnl_subsys_cthelper irq_context: 0 nf_ct_helper_mutex irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 nf_conntrack_expect_lock irq_context: 0 net_rwsem irq_context: 0 nf_conntrack_mutex irq_context: 0 pernet_ops_rwsem nf_log_mutex irq_context: 0 nf_ct_nat_helpers_mutex irq_context: 0 nfnl_subsys_nftables irq_context: 0 nfnl_subsys_nftcompat irq_context: 0 masq_mutex irq_context: 0 masq_mutex pernet_ops_rwsem irq_context: 0 masq_mutex pernet_ops_rwsem rtnl_mutex irq_context: 0 masq_mutex (inetaddr_chain).rwsem irq_context: 0 masq_mutex inet6addr_chain.lock irq_context: 0 &xt[i].mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 subsys mutex#78 irq_context: 0 subsys mutex#78 &k->k_lock irq_context: 0 nfnl_subsys_ipset irq_context: 0 ip_set_type_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 ip_vs_sched_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 ip_vs_pe_mutex irq_context: 0 tunnel4_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 xfrm4_protocol_mutex irq_context: 0 &xt[i].mutex fs_reclaim irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xt[i].mutex pool_lock#2 irq_context: 0 inet_diag_table_mutex irq_context: 0 xfrm_km_lock irq_context: 0 xfrm_translator_lock irq_context: 0 xfrm6_protocol_mutex irq_context: 0 tunnel6_mutex irq_context: 0 xfrm_if_cb_lock irq_context: 0 inetsw6_lock irq_context: 0 &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 (crypto_chain).rwsem fs_reclaim irq_context: 0 (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (crypto_chain).rwsem &c->lock irq_context: 0 (crypto_chain).rwsem &pcp->lock &zone->lock irq_context: 0 (crypto_chain).rwsem &zone->lock irq_context: 0 (crypto_chain).rwsem &____s->seqcount irq_context: 0 (crypto_chain).rwsem pool_lock#2 irq_context: 0 (crypto_chain).rwsem kthread_create_lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (crypto_chain).rwsem &x->wait irq_context: 0 (crypto_chain).rwsem &rq->__lock irq_context: 0 (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 &x->wait#21 irq_context: 0 &x->wait#21 &p->pi_lock irq_context: 0 &x->wait#21 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#21 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->alloc_lock &x->wait irq_context: 0 (crypto_chain).rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 stp_proto_mutex irq_context: 0 stp_proto_mutex llc_sap_list_lock irq_context: 0 stp_proto_mutex llc_sap_list_lock pool_lock#2 irq_context: 0 switchdev_notif_chain.lock irq_context: 0 (switchdev_blocking_notif_chain).rwsem irq_context: 0 br_ioctl_mutex irq_context: 0 nf_ct_proto_mutex irq_context: 0 ebt_mutex irq_context: 0 ebt_mutex fs_reclaim irq_context: 0 ebt_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ebt_mutex pool_lock#2 irq_context: 0 dsa_tag_drivers_lock irq_context: 0 rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 protocol_list_lock irq_context: 0 linkfail_lock irq_context: 0 rtnl_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fs_reclaim pool_lock#2 irq_context: 0 rose_neigh_list_lock irq_context: 0 proto_tab_lock#2 irq_context: 0 bt_proto_lock irq_context: 0 bt_proto_lock pool_lock#2 irq_context: 0 bt_proto_lock &dir->lock irq_context: 0 bt_proto_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock chan_list_lock irq_context: 0 bt_proto_lock l2cap_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP chan_list_lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 rfcomm_wq.lock irq_context: 0 rfcomm_mutex irq_context: 0 auth_domain_lock irq_context: 0 registered_mechs_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 atm_dev_notify_chain.lock irq_context: 0 genl_mutex irq_context: 0 proto_tab_lock#3 irq_context: 0 vlan_ioctl_mutex irq_context: 0 pernet_ops_rwsem (console_sem).lock irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner_lock irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rds_info_lock irq_context: 0 rds_trans_sem irq_context: 0 rds_trans_sem (console_sem).lock irq_context: 0 rds_trans_sem console_lock console_srcu console_owner_lock irq_context: 0 rds_trans_sem console_lock console_srcu console_owner irq_context: 0 rds_trans_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 rds_trans_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &id_priv->lock irq_context: 0 lock#7 irq_context: 0 lock#7 fs_reclaim irq_context: 0 lock#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 lock#7 pool_lock#2 irq_context: 0 lock#7 &xa->xa_lock#12 irq_context: 0 lock#7 &xa->xa_lock#12 pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem wq_pool_mutex irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &list->lock#4 irq_context: 0 pernet_ops_rwsem &dir->lock#2 irq_context: 0 pernet_ops_rwsem ptype_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 smc_wr_rx_hash_lock irq_context: 0 v9fs_trans_lock irq_context: 0 pernet_ops_rwsem &this->receive_lock irq_context: 0 &x->wait#17 &p->pi_lock irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 lowpan_nhc_lock irq_context: 0 ovs_mutex irq_context: 0 pernet_ops_rwsem once_lock irq_context: 0 pernet_ops_rwsem once_lock crngs.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &zone->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rcu_read_lock quarantine_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#79 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &base->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &base->lock &obj_hash[i].lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: hardirq &x->wait#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&gc_work->dwork)->timer irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kernfs_idr_lock &obj_hash[i].lock irq_context: 0 kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_callback &base->lock irq_context: softirq rcu_callback &base->lock &obj_hash[i].lock irq_context: softirq &(&ipvs->defense_work)->timer irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &s->s_inode_list_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->dropentry_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->droppacket_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->securetcp_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq lib/debugobjects.c:101 irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work irq_context: 0 &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) pool_lock#2 irq_context: softirq (&net->can.stattimer) irq_context: softirq (&net->can.stattimer) &obj_hash[i].lock irq_context: softirq (&net->can.stattimer) &base->lock irq_context: softirq (&net->can.stattimer) &base->lock &obj_hash[i].lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock purge_vmap_area_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &base->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &base->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &meta->lock irq_context: 0 &root->kernfs_rwsem rcu_node_0 irq_context: 0 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &base->lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &base->lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &c->lock irq_context: 0 cb_lock genl_mutex &____s->seqcount irq_context: 0 lock map_idr_lock irq_context: 0 lock map_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 purge_vmap_area_lock irq_context: 0 lock prog_idr_lock irq_context: 0 lock prog_idr_lock pool_lock#2 irq_context: 0 bpf_lock irq_context: 0 rcu_read_lock_trace fs_reclaim irq_context: 0 rcu_read_lock_trace fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rcu_read_lock_trace &c->lock irq_context: 0 rcu_read_lock_trace &pcp->lock &zone->lock irq_context: 0 rcu_read_lock_trace &zone->lock irq_context: 0 rcu_read_lock_trace &____s->seqcount irq_context: 0 rcu_read_lock_trace pool_lock#2 irq_context: 0 rcu_read_lock_trace &obj_hash[i].lock irq_context: 0 rcu_read_lock_trace lock irq_context: 0 rcu_read_lock_trace lock btf_idr_lock irq_context: 0 rcu_read_lock_trace lock btf_idr_lock pool_lock#2 irq_context: 0 rcu_read_lock_trace &newf->file_lock irq_context: 0 rcu_read_lock_trace &sb->s_type->i_lock_key#15 irq_context: 0 rcu_read_lock_trace &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 rcu_read_lock_trace rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_trace lock map_idr_lock irq_context: 0 rcu_read_lock_trace &map->freeze_mutex irq_context: 0 key_types_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem fs_reclaim irq_context: 0 key_types_sem asymmetric_key_parsers_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem asymmetric_key_parsers_sem pool_lock#2 irq_context: 0 key_types_sem asymmetric_key_parsers_sem crypto_alg_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem crypto_alg_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem fs_reclaim irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &c->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &pcp->lock &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &____s->seqcount irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem pool_lock#2 irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem kthread_create_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &x->wait irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &x->wait#21 irq_context: 0 key_types_sem asymmetric_key_parsers_sem &base->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &base->lock &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (&timer.timer) irq_context: 0 key_types_sem asymmetric_key_parsers_sem &c->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &pcp->lock &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 key_types_sem asymmetric_key_parsers_sem &____s->seqcount irq_context: 0 key_types_sem &type->lock_class irq_context: 0 key_types_sem &type->lock_class fs_reclaim irq_context: 0 key_types_sem &type->lock_class fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem &type->lock_class &c->lock irq_context: 0 key_types_sem &type->lock_class &pcp->lock &zone->lock irq_context: 0 key_types_sem &type->lock_class &zone->lock irq_context: 0 key_types_sem &type->lock_class &____s->seqcount irq_context: 0 key_types_sem &type->lock_class pool_lock#2 irq_context: 0 key_types_sem &type->lock_class key_user_lock irq_context: 0 key_types_sem &type->lock_class crngs.lock irq_context: 0 key_types_sem &type->lock_class key_serial_lock irq_context: 0 key_types_sem &type->lock_class key_construction_mutex irq_context: 0 key_types_sem &type->lock_class key_construction_mutex &obj_hash[i].lock irq_context: 0 key_types_sem &type->lock_class key_construction_mutex pool_lock#2 irq_context: 0 key_types_sem &type->lock_class ima_keys_lock irq_context: 0 key_types_sem &obj_hash[i].lock irq_context: 0 key_types_sem pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex crypto_alg_sem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock free_vmap_area_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock vmap_area_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock init_mm.page_table_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &c->lock irq_context: 0 slab_mutex lock irq_context: 0 slab_mutex lock kernfs_idr_lock irq_context: 0 slab_mutex &root->kernfs_rwsem irq_context: 0 slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 slab_mutex &k->list_lock irq_context: 0 slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 slab_mutex lock kernfs_idr_lock &c->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 slab_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 (wq_completion)events (debug_obj_work).work pool_lock#2 irq_context: 0 (wq_completion)events (debug_obj_work).work &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (debug_obj_work).work &zone->lock irq_context: 0 (wq_completion)events (debug_obj_work).work &____s->seqcount irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (debug_obj_work).work &obj_hash[i].lock irq_context: 0 slab_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 slab_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 slab_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 slab_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pcpu_drain_mutex &pcp->lock irq_context: 0 pcpu_drain_mutex &pcp->lock &zone->lock irq_context: 0 pcpu_drain_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &base->lock &obj_hash[i].lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 reading_mutex &rq->__lock irq_context: 0 reading_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq|softirq &x->wait#12 &p->pi_lock irq_context: 0 reading_mutex rcu_read_lock &rq->__lock irq_context: 0 reading_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 (wq_completion)events netstamp_work irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 &x->wait#22 irq_context: 0 &x->wait#22 &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &local->services_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 pernet_ops_rwsem &call->waitq irq_context: 0 pernet_ops_rwsem &rx->call_lock irq_context: 0 pernet_ops_rwsem &rxnet->call_lock irq_context: 0 bio_slab_lock slab_mutex &root->kernfs_rwsem irq_context: 0 bio_slab_lock slab_mutex &k->list_lock irq_context: 0 bio_slab_lock slab_mutex lock irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock irq_context: 0 bio_slab_lock slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 bio_slab_lock slab_mutex &pcp->lock &zone->lock irq_context: 0 bio_slab_lock slab_mutex &zone->lock irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: softirq (&rxnet->peer_keepalive_timer) irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 init_user_ns.keyring_sem irq_context: 0 init_user_ns.keyring_sem key_user_lock irq_context: 0 init_user_ns.keyring_sem root_key_user.lock irq_context: 0 init_user_ns.keyring_sem fs_reclaim irq_context: 0 init_user_ns.keyring_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 init_user_ns.keyring_sem pool_lock#2 irq_context: 0 init_user_ns.keyring_sem crngs.lock irq_context: 0 init_user_ns.keyring_sem key_serial_lock irq_context: 0 init_user_ns.keyring_sem key_construction_mutex irq_context: 0 init_user_ns.keyring_sem &type->lock_class irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krxrpcd irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock root_key_user.lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &rxnet->peer_hash_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex keyring_name_lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &base->lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex pool_lock#2 irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &base->lock &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem keyring_serialise_link_lock irq_context: 0 init_user_ns.keyring_sem key_construction_mutex keyring_name_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 &x->wait#21 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 template_list irq_context: 0 idr_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem rcu_read_lock pool_lock#2 irq_context: 0 ima_extend_list_mutex irq_context: 0 ima_extend_list_mutex fs_reclaim irq_context: 0 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ima_extend_list_mutex pool_lock#2 irq_context: 0 pci_bus_sem irq_context: 0 clk_debug_lock irq_context: 0 (wq_completion)events_unbound deferred_probe_work irq_context: 0 (wq_completion)events_unbound deferred_probe_work deferred_probe_mutex irq_context: 0 deferred_probe_work irq_context: 0 dpm_list_mtx (console_sem).lock irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner_lock irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner &port_lock_key irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner console_owner_lock irq_context: 0 console_mutex &root->kernfs_rwsem irq_context: 0 console_mutex kernfs_notify_lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 console_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 k-sk_lock-AF_INET irq_context: 0 k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 k-slock-AF_INET irq_context: 0 k-sk_lock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 k-slock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &cfs_rq->removed.lock irq_context: 0 reg_requests_lock irq_context: 0 (wq_completion)events reg_work irq_context: 0 (wq_completion)events reg_work rtnl_mutex irq_context: 0 (wq_completion)events reg_work rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events reg_work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events reg_work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &base->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &fw_cache.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &fw_cache.lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) async_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock init_fs.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_read_lock &____s->seqcount#4 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 detector_work irq_context: 0 &wq->mutex &pool->lock/1 irq_context: 0 &wq->mutex &x->wait#10 irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 acpi_gpio_deferred_req_irqs_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_owner irq_context: softirq fs/file_table.c:431 irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (delayed_fput_work).work irq_context: 0 (wq_completion)events (delayed_fput_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (delayed_fput_work).work &rq->__lock irq_context: 0 (wq_completion)events (delayed_fput_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &x->wait#9 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem bus_type_sem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem sysfs_symlink_target_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &dev->power.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dpm_list_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#80 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fw_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem running_helpers_waitq.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &x->wait#23 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (delayed_fput_work).work pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#2 irq_context: 0 tomoyo_ss &c->lock irq_context: 0 tomoyo_ss &____s->seqcount irq_context: 0 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 tomoyo_ss &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 tomoyo_ss tomoyo_log_lock irq_context: 0 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 cdev_lock irq_context: 0 tty_mutex (console_sem).lock irq_context: 0 tty_mutex console_lock irq_context: 0 tty_mutex fs_reclaim irq_context: 0 tty_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tty_mutex &pcp->lock &zone->lock irq_context: 0 tty_mutex &zone->lock irq_context: 0 tty_mutex &____s->seqcount irq_context: 0 tty_mutex pool_lock#2 irq_context: 0 tty_mutex rcu_read_lock pool_lock#2 irq_context: 0 tty_mutex &obj_hash[i].lock irq_context: 0 tty_mutex tty_ldiscs_lock irq_context: 0 tty_mutex &k->list_lock irq_context: 0 tty_mutex &k->k_lock irq_context: 0 tty_mutex &tty->legacy_mutex irq_context: 0 tty_mutex &tty->legacy_mutex &tty->read_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->write_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem fs_reclaim irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem pool_lock#2 irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &c->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &____s->seqcount irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem free_vmap_area_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem free_vmap_area_lock &obj_hash[i].lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem free_vmap_area_lock pool_lock#2 irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem vmap_area_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &pcp->lock &zone->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &zone->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->write_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->read_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->legacy_mutex irq_context: 0 &tty->legacy_mutex &tty->files_lock irq_context: 0 &tty->legacy_mutex &port->lock irq_context: 0 &tty->legacy_mutex &port->mutex irq_context: 0 &tty->legacy_mutex &port->mutex fs_reclaim irq_context: 0 &tty->legacy_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->legacy_mutex &port->mutex &____s->seqcount irq_context: 0 &tty->legacy_mutex &port->mutex pool_lock#2 irq_context: 0 &tty->legacy_mutex &port->mutex &port_lock_key irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex fs_reclaim irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex pool_lock#2 irq_context: 0 &tty->legacy_mutex &port->mutex &i->lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 &tty->legacy_mutex &port->mutex register_lock irq_context: 0 &tty->legacy_mutex &port->mutex &irq_desc_lock_class irq_context: 0 &tty->legacy_mutex &port->mutex proc_subdir_lock irq_context: 0 &tty->legacy_mutex &port->mutex proc_inum_ida.xa_lock irq_context: 0 &tty->legacy_mutex &port->mutex proc_subdir_lock irq_context: hardirq &i->lock irq_context: 0 &tty->legacy_mutex &port_lock_key irq_context: 0 detected_devices_mutex irq_context: 0 sb_writers#2 irq_context: 0 sb_writers#2 mount_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 tomoyo_ss &zone->lock irq_context: 0 &sb->s_type->i_mutex_key rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &zone->lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &c->lock irq_context: 0 tomoyo_ss file_systems_lock irq_context: 0 tomoyo_ss fs_reclaim irq_context: 0 tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#26/1 fs_reclaim irq_context: 0 &type->s_umount_key#26/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#26/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#26/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#26/1 &zone->lock irq_context: 0 &type->s_umount_key#26/1 &____s->seqcount irq_context: 0 &type->s_umount_key#26/1 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#26/1 lock#4 irq_context: 0 &type->s_umount_key#26/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#26/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#26/1 &dd->lock irq_context: 0 &type->s_umount_key#26/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#26/1 &rq->__lock irq_context: 0 &type->s_umount_key#26/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#26/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#26/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#26/1 lock#5 irq_context: 0 &type->s_umount_key#26/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#26/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#26/1 lock#3 irq_context: 0 &type->s_umount_key#26/1 lock#3 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 lock#3 &rq->__lock irq_context: 0 &type->s_umount_key#26/1 lock#3 (work_completion)(work) irq_context: 0 &type->s_umount_key#26/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#26/1 sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 sb_lock irq_context: 0 &type->s_umount_key#27/1 irq_context: 0 &type->s_umount_key#27/1 fs_reclaim irq_context: 0 &type->s_umount_key#27/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 &zone->lock irq_context: 0 &type->s_umount_key#27/1 &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#27/1 lock#4 irq_context: 0 &type->s_umount_key#27/1 &c->lock irq_context: 0 &type->s_umount_key#27/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#27/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#27/1 &dd->lock irq_context: 0 &type->s_umount_key#27/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#27/1 &rq->__lock irq_context: 0 &type->s_umount_key#27/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#27/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#27/1 lock#5 irq_context: 0 &type->s_umount_key#27/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#27/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#27/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#27/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#27/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#27/1 inode_hash_lock irq_context: 0 &type->s_umount_key#27/1 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#27/1 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &dd->lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &rsp->gp_wait irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#7 &c->lock irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#7 &zone->lock irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#7 &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 proc_subdir_lock irq_context: 0 &type->s_umount_key#27/1 proc_inum_ida.xa_lock irq_context: 0 &type->s_umount_key#27/1 proc_subdir_lock irq_context: 0 &type->s_umount_key#27/1 &journal->j_state_lock irq_context: 0 &type->s_umount_key#27/1 kthread_create_lock irq_context: 0 &type->s_umount_key#27/1 &p->pi_lock irq_context: 0 &type->s_umount_key#27/1 &x->wait irq_context: 0 &type->s_umount_key#27/1 &journal->j_wait_done_commit irq_context: 0 &journal->j_wait_done_commit irq_context: 0 &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit irq_context: 0 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &type->s_umount_key#27/1 &journal->j_state_lock irq_context: 0 &type->s_umount_key#27/1 &p->alloc_lock irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &type->s_umount_key#27/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#27/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#27/1 wq_pool_mutex irq_context: 0 &type->s_umount_key#27/1 wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &type->s_umount_key#27/1 &ei->i_es_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex fs_reclaim irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &c->lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &n->list_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &k->list_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex lock kernfs_idr_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#27/1 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#27/1 ext4_li_mtx irq_context: 0 &type->s_umount_key#27/1 lock irq_context: 0 &type->s_umount_key#27/1 lock kernfs_idr_lock irq_context: 0 &type->s_umount_key#27/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#27/1 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#27/1 lock kernfs_idr_lock &c->lock irq_context: 0 &type->s_umount_key#27/1 lock kernfs_idr_lock &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 lock kernfs_idr_lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 (console_sem).lock irq_context: 0 &type->s_umount_key#27/1 console_lock console_srcu console_owner_lock irq_context: 0 &type->s_umount_key#27/1 console_lock console_srcu console_owner irq_context: 0 &type->s_umount_key#27/1 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &type->s_umount_key#27/1 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#27/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key namespace_sem &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock quarantine_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_state.barrier_mutex irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (init_mm).mmap_lock irq_context: 0 &type->s_umount_key#28/1 irq_context: 0 &type->s_umount_key#28/1 fs_reclaim irq_context: 0 &type->s_umount_key#28/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#28/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#28/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#28/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#28/1 sb_lock irq_context: 0 &type->s_umount_key#28/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#28/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#28/1 &zone->lock irq_context: 0 &type->s_umount_key#28/1 &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 &c->lock irq_context: 0 &type->s_umount_key#28/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#28/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &zone->lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 &dentry->d_lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 proc_subdir_lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 sb_writers#3 irq_context: 0 sb_writers#3 mount_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 sysctl_lock irq_context: 0 sb_writers#3 fs_reclaim irq_context: 0 sb_writers#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 pool_lock#2 irq_context: 0 sb_writers#3 &obj_hash[i].lock irq_context: 0 sb_writers#3 &h->resize_lock irq_context: 0 sb_writers#3 &h->resize_lock free_hpage_work irq_context: 0 sb_writers#3 &h->resize_lock hugetlb_lock irq_context: 0 sb_writers#3 &h->resize_lock fs_reclaim irq_context: 0 sb_writers#3 &h->resize_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &h->resize_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#3 &h->resize_lock &____s->seqcount irq_context: 0 sb_writers#3 &h->resize_lock pool_lock#2 irq_context: 0 sb_writers#3 &h->resize_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 hugetlb_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex init_fs.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#7 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 irq_context: 0 &sig->cred_guard_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex aa_buffers_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &folio_wait_table[i] irq_context: 0 &sig->cred_guard_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_wait.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &meta->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex irq_context: 0 &sig->cred_guard_mutex &iint->mutex &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &iint->mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex binfmt_lock irq_context: 0 &sig->cred_guard_mutex entries_lock irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock &lru->node[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &sighand->siglock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &newf->file_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock irq_context: 0 batched_entropy_u16.lock irq_context: 0 batched_entropy_u16.lock crngs.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 irq_context: 0 &mm->mmap_lock lock#4 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 irq_context: 0 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &iint->mutex irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &iint->mutex &ei->xattr_sem irq_context: 0 &iint->mutex fs_reclaim irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex &c->lock irq_context: 0 &iint->mutex &pcp->lock &zone->lock irq_context: 0 &iint->mutex &zone->lock irq_context: 0 &iint->mutex &____s->seqcount irq_context: 0 &iint->mutex pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock irq_context: 0 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &iint->mutex &folio_wait_table[i] irq_context: 0 &iint->mutex &rq->__lock irq_context: 0 &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &iint->mutex &obj_hash[i].lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &iint->mutex ima_extend_list_mutex irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex ima_extend_list_mutex pool_lock#2 irq_context: 0 binfmt_lock irq_context: 0 &dentry->d_lock &lru->node[i].lock irq_context: 0 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#29 irq_context: 0 &type->s_umount_key#29 shrinker_rwsem irq_context: 0 &type->s_umount_key#29 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#29 &dentry->d_lock irq_context: 0 &type->s_umount_key#29 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->s_umount_key#29 rename_lock.seqcount irq_context: 0 &type->s_umount_key#29 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#29 &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#29 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 &type->s_umount_key#29 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#29 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#29 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#29 sysctl_lock irq_context: 0 &type->s_umount_key#29 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#29 pool_lock#2 irq_context: 0 &type->s_umount_key#29 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#29 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#29 sb_lock irq_context: 0 unnamed_dev_ida.xa_lock irq_context: 0 &xa->xa_lock#7 irq_context: 0 prog_idr_lock irq_context: 0 prog_idr_lock &obj_hash[i].lock irq_context: 0 prog_idr_lock pool_lock#2 irq_context: hardirq &rq->__lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 map_idr_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 btf_idr_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 btf_idr_lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 btf_idr_lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &vma->vm_lock->lock fs_reclaim irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &vma->vm_lock->lock &pcp->lock &zone->lock irq_context: 0 &vma->vm_lock->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &iint->mutex rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock irq_context: 0 &f->f_pos_lock &mm->mmap_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &c->lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock batched_entropy_u8.lock irq_context: 0 &iint->mutex mapping.invalidate_lock kfence_freelist_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: 0 &iint->mutex batched_entropy_u8.lock irq_context: 0 &iint->mutex kfence_freelist_lock irq_context: 0 &iint->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dd->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 &____s->seqcount irq_context: 0 &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 &iint->mutex ima_extend_list_mutex &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem quarantine_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 kfence_freelist_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start irq_context: 0 &port->mutex irq_context: 0 &tty->ldisc_sem irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &mm->mmap_lock irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &port->mutex irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->read_wait irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 integrity_iint_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem &c->lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 &sb->s_type->i_lock_key#23 irq_context: 0 &f->f_pos_lock &p->lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock irq_context: 0 &type->s_umount_key#30/1 irq_context: 0 &type->s_umount_key#30/1 fs_reclaim irq_context: 0 &type->s_umount_key#30/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#30/1 pool_lock#2 irq_context: 0 &type->s_umount_key#30/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#30/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#30/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#30/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#30/1 sb_lock irq_context: 0 &type->s_umount_key#30/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#30/1 &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#30/1 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#30/1 &dentry->d_lock irq_context: 0 &root->kernfs_iattr_rwsem irq_context: 0 &type->i_mutex_dir_key#4 irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq irq_context: 0 &ent->pde_unload_lock irq_context: 0 &f->f_pos_lock &p->lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock file_systems_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 &x->wait#25 irq_context: 0 &mm->mmap_lock resource_lock irq_context: 0 &net->unx.table.locks[i] irq_context: 0 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->unx.table.locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &u->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->lock clock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &____s->seqcount#3 irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->wait_chldexit irq_context: 0 tasklist_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock crngs.lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 key irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock irq_context: 0 &mm->mmap_lock &p->alloc_lock irq_context: 0 &mm->mmap_lock lock#4 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &memcg->mm_list.lock irq_context: 0 tasklist_lock &sighand->siglock &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock &____s->seqcount#5 irq_context: 0 &prev->lock irq_context: 0 &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 sb_writers#4 irq_context: 0 sb_writers#4 mount_lock irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &mapping->private_lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 sb_writers#3 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &wb->list_lock irq_context: 0 sb_writers#3 &wb->list_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pid->lock irq_context: 0 &p->alloc_lock &fs->lock &dentry->d_lock irq_context: 0 &f->f_pos_lock &p->lock namespace_sem irq_context: 0 &f->f_pos_lock &p->lock namespace_sem &new_ns->ns_lock irq_context: 0 &f->f_pos_lock &p->lock namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &f->f_pos_lock &p->lock namespace_sem rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#31 irq_context: 0 &type->s_umount_key#31 &lru->node[i].lock irq_context: 0 &type->s_umount_key#31 &dentry->d_lock irq_context: 0 &type->s_umount_key#31 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#31 &sb->s_type->i_lock_key#22 &lru->node[i].lock irq_context: 0 &type->s_umount_key#31 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 pool_lock#2 irq_context: 0 &type->s_umount_key#31 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#31 &journal->j_state_lock irq_context: 0 &type->s_umount_key#31 &p->alloc_lock irq_context: 0 &type->s_umount_key#31 (work_completion)(&sbi->s_error_work) irq_context: 0 &type->s_umount_key#31 &journal->j_state_lock irq_context: 0 &type->s_umount_key#31 key#3 irq_context: 0 &type->s_umount_key#31 key#4 irq_context: 0 &type->s_umount_key#31 &sbi->s_error_lock irq_context: 0 &type->s_umount_key#31 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 &base->lock irq_context: 0 &type->s_umount_key#31 &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#31 &dd->lock irq_context: 0 &type->s_umount_key#31 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#31 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 bit_wait_table + i irq_context: 0 &type->s_umount_key#31 &rq->__lock irq_context: 0 &type->s_umount_key#31 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock irq_context: softirq &fq->mq_flush_lock tk_core.seq.seqcount irq_context: softirq &fq->mq_flush_lock &q->requeue_lock irq_context: softirq &fq->mq_flush_lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &q->requeue_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx irq_context: 0 &type->s_umount_key#31 ext4_li_mtx fs_reclaim irq_context: 0 &type->s_umount_key#31 ext4_li_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31 ext4_li_mtx pool_lock#2 irq_context: 0 &type->s_umount_key#31 ext4_li_mtx batched_entropy_u16.lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &eli->li_list_mtx irq_context: 0 &type->s_umount_key#31 ext4_li_mtx kthread_create_lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &p->pi_lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &rq->__lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &x->wait irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &obj_hash[i].lock irq_context: 0 &eli->li_list_mtx irq_context: 0 &type->s_umount_key#31 (console_sem).lock irq_context: 0 &type->s_umount_key#31 console_lock console_srcu console_owner_lock irq_context: 0 &type->s_umount_key#31 console_lock console_srcu console_owner irq_context: 0 &type->s_umount_key#31 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &type->s_umount_key#31 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#31 mount_lock irq_context: 0 &type->s_umount_key#31 mount_lock mount_lock.seqcount irq_context: 0 &type->s_umount_key#31 mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 namespace_sem irq_context: 0 namespace_sem &new_ns->ns_lock irq_context: 0 rcu_read_lock &pid->lock irq_context: 0 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 rename_lock.seqcount irq_context: 0 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 &pid->lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &____s->seqcount irq_context: 0 sb_writers#4 pool_lock#2 irq_context: 0 sb_writers#4 &c->lock irq_context: 0 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 jbd2_handle &c->lock irq_context: 0 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &wb->list_lock irq_context: 0 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &wb->work_lock irq_context: 0 sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->xattr_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_internal rcu_read_lock init_fs.seq.seqcount irq_context: 0 sb_internal rcu_read_lock rcu_read_lock mount_lock.seqcount irq_context: 0 sb_internal rcu_read_lock rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_internal pool_lock#2 irq_context: 0 sb_internal &journal->j_state_lock irq_context: 0 sb_internal jbd2_handle irq_context: 0 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_internal jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_internal &obj_hash[i].lock irq_context: 0 &ei->i_data_sem irq_context: 0 &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sighand->siglock hrtimer_bases.lock irq_context: 0 &sighand->siglock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &sighand->siglock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 file_rwsem irq_context: 0 file_rwsem &ctx->flc_lock irq_context: 0 file_rwsem &ctx->flc_lock &fll->lock irq_context: 0 &ctx->flc_lock irq_context: 0 &sig->cred_guard_mutex tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 mount_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex sb_writers#4 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss quarantine_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 mount_lock irq_context: 0 &f->f_pos_lock sb_writers#4 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &meta->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &c->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&cb->timer) irq_context: softirq (&cb->timer) &obj_hash[i].lock irq_context: softirq (&cb->timer) &base->lock irq_context: softirq (&cb->timer) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &obj_hash[i].lock pool_lock irq_context: 0 &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 mount_lock irq_context: 0 &iint->mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex sb_writers#4 pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &iint->mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &iint->mutex sb_writers#4 &wb->list_lock irq_context: 0 &iint->mutex sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 rcu_read_lock &p->alloc_lock irq_context: 0 &type->s_umount_key#32/1 irq_context: 0 &type->s_umount_key#32/1 fs_reclaim irq_context: 0 &type->s_umount_key#32/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32/1 pool_lock#2 irq_context: 0 &type->s_umount_key#32/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#32/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#32/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#32/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#32/1 sb_lock irq_context: 0 &type->s_umount_key#32/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#32/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_lock_key#25 irq_context: 0 &type->s_umount_key#32/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#32/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_lock_key#25 &dentry->d_lock irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 fs_reclaim irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 pool_lock#2 irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &sb->s_type->i_lock_key#25 irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &sb->s_type->i_lock_key#25 &dentry->d_lock irq_context: 0 &type->s_umount_key#32/1 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->s_umount_key/1 fs_reclaim irq_context: 0 &type->s_umount_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33 irq_context: 0 &type->s_umount_key#33 sb_lock irq_context: 0 &type->s_umount_key#33 fs_reclaim irq_context: 0 &type->s_umount_key#33 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33 pool_lock#2 irq_context: 0 &type->s_umount_key#33 &dentry->d_lock irq_context: 0 &type->s_umount_key#33 &lru->node[i].lock irq_context: 0 &type->s_umount_key#33 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#33 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 tomoyo_ss &rq->__lock irq_context: 0 &type->s_umount_key#34 irq_context: 0 &type->s_umount_key#34 sb_lock irq_context: 0 &type->s_umount_key#34 &dentry->d_lock irq_context: 0 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 &type->s_umount_key#35/1 irq_context: 0 &type->s_umount_key#35/1 fs_reclaim irq_context: 0 &type->s_umount_key#35/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#35/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#35/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#35/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#35/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#35/1 sb_lock irq_context: 0 &type->s_umount_key#35/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#35/1 &c->lock irq_context: 0 &type->s_umount_key#35/1 &____s->seqcount irq_context: 0 &type->s_umount_key#35/1 pool_lock#2 irq_context: 0 &type->s_umount_key#35/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#35/1 &sb->s_type->i_lock_key#26 irq_context: 0 &type->s_umount_key#35/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#35/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#35/1 &sb->s_type->i_lock_key#26 &dentry->d_lock irq_context: 0 &type->s_umount_key#35/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 redirect_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock fs_reclaim irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock pool_lock#2 irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &mm->mmap_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &rq->__lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &port_lock_key irq_context: hardirq &i->lock &port_lock_key irq_context: hardirq &i->lock &port_lock_key &port->lock irq_context: hardirq &i->lock &port_lock_key &tty->write_wait irq_context: hardirq &i->lock &port_lock_key &tty->write_wait &p->pi_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock hrtimer_bases.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &port_lock_key irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &port_lock_key &dev->power.lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->files_lock irq_context: 0 &tty->ldisc_sem &tty->write_wait irq_context: 0 &type->s_umount_key#36/1 irq_context: 0 &type->s_umount_key#36/1 fs_reclaim irq_context: 0 &type->s_umount_key#36/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#36/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#36/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#36/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#36/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#36/1 sb_lock irq_context: 0 &type->s_umount_key#36/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#36/1 &____s->seqcount irq_context: 0 &type->s_umount_key#36/1 pool_lock#2 irq_context: 0 &type->s_umount_key#36/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#36/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#36/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#36/1 &sb->s_type->i_lock_key#27 irq_context: 0 &type->s_umount_key#36/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#36/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#36/1 &sb->s_type->i_lock_key#27 &dentry->d_lock irq_context: 0 &type->s_umount_key#36/1 fuse_mutex irq_context: 0 &type->s_umount_key#36/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#37/1 irq_context: 0 &type->s_umount_key#37/1 fs_reclaim irq_context: 0 &type->s_umount_key#37/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#37/1 pool_lock#2 irq_context: 0 &type->s_umount_key#37/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#37/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#37/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#37/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#37/1 &c->lock irq_context: 0 &type->s_umount_key#37/1 &____s->seqcount irq_context: 0 &type->s_umount_key#37/1 sb_lock irq_context: 0 &type->s_umount_key#37/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#37/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#37/1 &sb->s_type->i_lock_key#28 irq_context: 0 &type->s_umount_key#37/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#37/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#37/1 &sb->s_type->i_lock_key#28 &dentry->d_lock irq_context: 0 &type->s_umount_key#37/1 pstore_sb_lock irq_context: 0 &type->s_umount_key#37/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#38/1 irq_context: 0 &type->s_umount_key#38/1 fs_reclaim irq_context: 0 &type->s_umount_key#38/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#38/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#38/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#38/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#38/1 sb_lock irq_context: 0 &type->s_umount_key#38/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#38/1 pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 &sb->s_type->i_lock_key#29 irq_context: 0 &type->s_umount_key#38/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#38/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#38/1 &sb->s_type->i_lock_key#29 &dentry->d_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock (kmod_concurrent_max).lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock fs_reclaim irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock &pool->lock/1 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &x->wait#17 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &rq->__lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss quarantine_lock irq_context: 0 uts_sem irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &meta->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock running_helpers_waitq.lock irq_context: 0 &type->s_umount_key#38/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#39 irq_context: 0 &type->s_umount_key#39 sb_lock irq_context: 0 &type->s_umount_key#39 fs_reclaim irq_context: 0 &type->s_umount_key#39 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#39 pool_lock#2 irq_context: 0 &type->s_umount_key#39 &dentry->d_lock irq_context: 0 &type->s_umount_key#39 &lru->node[i].lock irq_context: 0 &type->s_umount_key#39 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#39 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem &c->lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem &____s->seqcount irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key irq_context: 0 &type->i_mutex_dir_key#5 irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &wq irq_context: 0 sb_writers#5 irq_context: 0 sb_writers#5 mount_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key irq_context: 0 &sb->s_type->i_mutex_key#12 irq_context: 0 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u32.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &____s->seqcount irq_context: 0 sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &wb->list_lock irq_context: 0 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &c->lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount irq_context: 0 key#5 irq_context: 0 tomoyo_ss batched_entropy_u8.lock irq_context: 0 tomoyo_ss kfence_freelist_lock irq_context: 0 tomoyo_ss &meta->lock irq_context: 0 uts_sem irq_context: 0 uts_sem hostname_poll.wait.lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock mount_lock.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &fs->lock &dentry->d_lock irq_context: 0 dup_mmap_sem irq_context: 0 dup_mmap_sem &mm->mmap_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->page_table_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &obj_hash[i].lock irq_context: 0 &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock delayed_uprobe_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &memcg->mm_list.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pgd_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock key irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pcpu_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#5 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &xattrs->lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &info->lock irq_context: 0 &p->alloc_lock &x->wait#25 irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &obj_hash[i].lock irq_context: 0 &sighand->siglock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock percpu_counters_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock mount_lock.seqcount irq_context: 0 &u->iolock irq_context: 0 &u->iolock rlock-AF_UNIX irq_context: 0 &ei->socket.wq.wait irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: hardirq &dev->power.lock hrtimer_bases.lock irq_context: hardirq &dev->power.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &folio_wait_table[i] irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 rcu_read_lock &p->alloc_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#3 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &dentry->d_lock &lru->node[i].lock irq_context: 0 &bsd_socket_locks[i] irq_context: 0 sb_writers tk_core.seq.seqcount irq_context: 0 sb_writers &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &wb->list_lock irq_context: 0 sb_writers &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &u->lock irq_context: 0 &u->lock &u->lock/1 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &group->mark_mutex irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu irq_context: 0 &group->mark_mutex fs_reclaim irq_context: 0 &group->mark_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &group->mark_mutex &____s->seqcount irq_context: 0 &group->mark_mutex pool_lock#2 irq_context: 0 &group->mark_mutex &c->lock irq_context: 0 &group->mark_mutex lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock pool_lock#2 irq_context: 0 &group->mark_mutex ucounts_lock irq_context: 0 &group->mark_mutex &mark->lock irq_context: 0 &group->mark_mutex &mark->lock &fsnotify_mark_srcu irq_context: 0 &group->mark_mutex &mark->lock &fsnotify_mark_srcu &conn->lock irq_context: 0 &group->mark_mutex &mark->lock &conn->lock irq_context: 0 &group->mark_mutex &conn->lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock/1 irq_context: 0 rcu_read_lock pgd_lock irq_context: 0 rcu_read_lock key irq_context: 0 rcu_read_lock pcpu_lock irq_context: 0 rcu_read_lock percpu_counters_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers &sb->s_type->i_lock_key#5 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &fsnotify_mark_srcu &conn->lock irq_context: 0 &conn->lock irq_context: 0 &evdev->client_lock irq_context: 0 &evdev->mutex irq_context: 0 &evdev->mutex &dev->mutex#2 irq_context: 0 &evdev->mutex &mm->mmap_lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_node_0 irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex pool_lock#2 irq_context: 0 &evdev->mutex &dev->mutex#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK irq_context: 0 sk_lock-AF_NETLINK slock-AF_NETLINK irq_context: 0 slock-AF_NETLINK irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rlock-AF_NETLINK irq_context: 0 cb_lock fs_reclaim irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock pool_lock#2 irq_context: 0 cb_lock rlock-AF_NETLINK irq_context: 0 rlock-AF_NETLINK irq_context: 0 &nlk->wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rhashtable_bucket rhashtable_bucket/1 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#10 genl_sk_destructing_waitq.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &nlk->wait irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &k->k_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rdev->beacon_registrations_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rdev->mgmt_registrations_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->w) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &wdev->pmsr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem reg_indoor_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem hwsim_radio_lock irq_context: 0 nl_table_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &obj_hash[i].lock irq_context: 0 sb_writers#6 irq_context: 0 sb_writers#6 mount_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &c->lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tk_core.seq.seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &sb->s_type->i_lock_key#8 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &wb->list_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &wb->list_lock &sb->s_type->i_lock_key#8 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 &u->lock &sk->sk_peer_lock irq_context: 0 &u->lock rlock-AF_UNIX irq_context: 0 rcu_read_lock &ei->socket.wq.wait irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock irq_context: 0 &u->iolock &obj_hash[i].lock irq_context: 0 &u->iolock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: softirq rcu_callback rlock-AF_NETLINK irq_context: softirq rcu_callback &dir->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &____s->seqcount irq_context: 0 &group->notification_waitq irq_context: 0 &group->notification_lock irq_context: 0 &client->wait irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &u->lock/1 irq_context: 0 syslog_lock irq_context: 0 &u->iolock &meta->lock irq_context: 0 &u->iolock kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock key#6 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &sb->s_type->i_lock_key#14 irq_context: 0 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 &pipe->mutex/1 irq_context: 0 &pipe->rd_wait irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &____s->seqcount irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31 sb_writers#4 &____s->seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#31 sb_writers#4 lock#4 irq_context: 0 &type->s_umount_key#31 sb_writers#4 &mapping->private_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 &dd->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 &c->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock &dd->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 bit_wait_table + i irq_context: 0 &type->s_umount_key#31 sb_writers#4 &rq->__lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 &eli->li_list_mtx irq_context: 0 &type->s_umount_key#31 sb_writers#4 &journal->j_state_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &dd->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &x->wait#26 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_node_0 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock rcu_node_0 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &base->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock quarantine_lock irq_context: softirq &x->wait#26 irq_context: softirq &x->wait#26 &p->pi_lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem (&timer.timer) irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &type->s_umount_key#31 sb_writers#4 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sem->wait_lock irq_context: 0 &mm->mmap_lock &p->pi_lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 pool_lock#2 irq_context: 0 &u->lock clock-AF_UNIX irq_context: 0 &u->peer_wait irq_context: 0 rlock-AF_UNIX irq_context: 0 syslog_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &rq->__lock irq_context: 0 &pipe->mutex/1 &lock->wait_lock irq_context: 0 &pipe->mutex/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->wr_wait irq_context: 0 &lock->wait_lock irq_context: 0 &pipe->mutex/1 fs_reclaim irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &____s->seqcount irq_context: 0 &pipe->mutex/1 pool_lock#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock irq_context: 0 &pipe->rd_wait &p->pi_lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#7 tk_core.seq.seqcount irq_context: 0 sb_writers#7 mount_lock irq_context: 0 &pipe->mutex/1 &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 key#9 irq_context: 0 &u->iolock &rq->__lock irq_context: 0 &mm->mmap_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 &obj_hash[i].lock irq_context: 0 &u->lock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 &type->s_umount_key#31 sb_writers#4 batched_entropy_u8.lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 kfence_freelist_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 &meta->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 sb_writers#3 &dentry->d_lock irq_context: 0 sb_writers#3 tomoyo_ss irq_context: 0 sb_writers#3 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#3 tomoyo_ss &c->lock irq_context: 0 sb_writers#3 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#3 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#3 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#3 irq_context: 0 &f->f_pos_lock sb_writers#3 sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#3 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#3 &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &n->list_lock &c->lock irq_context: softirq (&cb->timer) tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 batched_entropy_u8.lock irq_context: 0 &iint->mutex sb_writers#4 kfence_freelist_lock irq_context: 0 &iint->mutex sb_writers#4 &meta->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem ptlock_ptr(page) irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock &p->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 &eli->li_list_mtx &obj_hash[i].lock irq_context: 0 &eli->li_list_mtx pool_lock#2 irq_context: 0 ext4_li_mtx irq_context: 0 ext4_li_mtx &eli->li_list_mtx irq_context: 0 ext4_li_mtx &obj_hash[i].lock irq_context: 0 ext4_li_mtx pool_lock#2 irq_context: 0 sk_lock-AF_UNIX irq_context: 0 sk_lock-AF_UNIX slock-AF_UNIX irq_context: 0 slock-AF_UNIX irq_context: hardirq log_wait.lock &p->pi_lock irq_context: hardirq log_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &ei->xattr_sem irq_context: 0 &type->i_mutex_dir_key#5 &c->lock irq_context: 0 &type->i_mutex_dir_key#5 &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xattrs->lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex &p->alloc_lock irq_context: 0 &vma->vm_lock->lock &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 low_water_lock console_owner_lock irq_context: 0 low_water_lock console_owner irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &sk->sk_peer_lock irq_context: 0 &ep->mtx irq_context: 0 epnested_mutex irq_context: 0 epnested_mutex &ep->mtx irq_context: 0 epnested_mutex &ep->mtx fs_reclaim irq_context: 0 epnested_mutex &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 epnested_mutex &ep->mtx &____s->seqcount irq_context: 0 epnested_mutex &ep->mtx pool_lock#2 irq_context: 0 epnested_mutex &ep->mtx &c->lock irq_context: 0 epnested_mutex &ep->mtx &f->f_lock irq_context: 0 epnested_mutex &ep->mtx &ei->socket.wq.wait irq_context: 0 epnested_mutex &ep->mtx &ep->lock irq_context: 0 epnested_mutex rcu_read_lock &f->f_lock irq_context: 0 &ep->mtx fs_reclaim irq_context: 0 &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx &f->f_lock irq_context: 0 &ep->mtx pool_lock#2 irq_context: 0 &ep->mtx &group->notification_waitq irq_context: 0 &ep->mtx &group->notification_lock irq_context: 0 &ep->mtx &ep->lock irq_context: 0 &ep->mtx &sighand->signalfd_wqh irq_context: 0 &ep->mtx &sighand->siglock irq_context: 0 &ep->mtx &ei->socket.wq.wait irq_context: 0 &ep->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &dentry->d_lock irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 &sb->s_type->i_lock_key#24 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 remove_cache_srcu &c->lock irq_context: 0 remove_cache_srcu &n->list_lock irq_context: 0 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss &n->list_lock irq_context: 0 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &root->kernfs_iattr_rwsem pgd_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 &root->kernfs_iattr_rwsem key irq_context: 0 &root->kernfs_iattr_rwsem pcpu_lock irq_context: 0 &root->kernfs_iattr_rwsem percpu_counters_lock irq_context: 0 &root->kernfs_iattr_rwsem pool_lock#2 irq_context: 0 swap_lock irq_context: 0 sb_writers#8 irq_context: 0 sb_writers#8 mount_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq#2 irq_context: 0 kn->active fs_reclaim irq_context: 0 kn->active fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active pool_lock#2 irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#13 irq_context: 0 &f->f_pos_lock sb_writers#8 irq_context: 0 &f->f_pos_lock sb_writers#8 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &f->f_pos_lock rcu_read_lock &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] irq_context: 0 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock pool_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock irq_context: 0 kn->active#2 fs_reclaim irq_context: 0 kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 &obj_hash[i].lock irq_context: 0 kn->active#2 &c->lock irq_context: 0 kn->active#2 &____s->seqcount irq_context: 0 kn->active#2 pool_lock#2 irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &c->lock irq_context: 0 kn->active &c->lock irq_context: 0 kn->active &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active uevent_sock_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active &n->list_lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &____s->seqcount irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &ep->mtx &____s->seqcount irq_context: 0 &ep->mtx &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &c->lock irq_context: 0 kn->active#2 &n->list_lock irq_context: 0 kn->active#2 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &c->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &n->list_lock &c->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#2 remove_cache_srcu irq_context: 0 kn->active#2 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#2 remove_cache_srcu &c->lock irq_context: 0 kn->active#2 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#2 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active#2 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &____s->seqcount irq_context: 0 &vma->vm_lock->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#2 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#2 rcu_read_lock &rq->__lock irq_context: 0 kn->active &n->list_lock irq_context: 0 kn->active &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 sb_writers#3 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#3 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &base->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 tomoyo_ss remove_cache_srcu irq_context: 0 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override pool_lock irq_context: 0 &type->i_mutex_dir_key#5 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#5 &n->list_lock &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &rq->__lock irq_context: 0 &f->f_pos_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &rq->__lock irq_context: 0 kn->active#2 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &n->list_lock irq_context: 0 remove_cache_srcu &rq->__lock irq_context: 0 kn->active#2 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#2 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#5 &obj_hash[i].lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#13 &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 kn->active#2 remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &vma->vm_lock->lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim &rq->__lock irq_context: 0 kn->active remove_cache_srcu irq_context: 0 kn->active remove_cache_srcu quarantine_lock irq_context: 0 kn->active remove_cache_srcu &c->lock irq_context: 0 kn->active remove_cache_srcu &n->list_lock irq_context: 0 kn->active remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active remove_cache_srcu &rq->__lock irq_context: 0 kn->active#3 &rq->__lock irq_context: 0 kn->active#3 fs_reclaim irq_context: 0 kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &rq->__lock irq_context: 0 kn->active#3 &c->lock irq_context: 0 kn->active#3 &____s->seqcount irq_context: 0 kn->active#3 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#3 rcu_read_lock &rq->__lock irq_context: 0 kn->active#3 &n->list_lock irq_context: 0 kn->active#3 &n->list_lock &c->lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#3 remove_cache_srcu irq_context: 0 kn->active#3 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#3 remove_cache_srcu &c->lock irq_context: 0 kn->active#3 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#3 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 rcu_read_lock &rq->__lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 quarantine_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 kn->active#3 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 kn->active#4 fs_reclaim irq_context: 0 kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#4 param_lock irq_context: 0 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &dentry->d_lock irq_context: 0 sb_writers#8 tomoyo_ss irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 tomoyo_ss &c->lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#4 fs_reclaim irq_context: 0 sb_writers#8 kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 irq_context: 0 sb_writers#8 iattr_mutex irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &____s->seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &c->lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &sem->wait_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#4 param_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#4 param_lock disk_events_mutex irq_context: 0 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu irq_context: 0 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#4 kfence_freelist_lock irq_context: 0 sb_writers#8 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &wb->list_lock irq_context: 0 sb_writers#8 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu irq_context: 0 sb_writers#5 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &info->lock irq_context: 0 sb_writers#5 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &xa->xa_lock#7 irq_context: 0 sb_writers#5 &obj_hash[i].lock irq_context: 0 sb_writers#5 pool_lock#2 irq_context: 0 sb_writers#5 &fsnotify_mark_srcu irq_context: 0 kn->active#5 fs_reclaim irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 fs_reclaim irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &c->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &____s->seqcount irq_context: 0 kn->active#5 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &____s->seqcount irq_context: 0 kn->active#5 &____s->seqcount irq_context: 0 kn->active#5 &rq->__lock irq_context: 0 kn->active#5 &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock &n->list_lock &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &n->list_lock &c->lock irq_context: 0 kn->active#6 fs_reclaim irq_context: 0 kn->active#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#7 fs_reclaim irq_context: 0 kn->active#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 fs_reclaim irq_context: 0 kn->active#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 &c->lock irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#9 fs_reclaim irq_context: 0 kn->active#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#9 &c->lock irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#10 fs_reclaim irq_context: 0 kn->active#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#11 fs_reclaim irq_context: 0 kn->active#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#12 fs_reclaim irq_context: 0 kn->active#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 quarantine_lock irq_context: 0 kn->active#5 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &rq->__lock irq_context: 0 kn->active#6 &c->lock irq_context: 0 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 kn->active#10 &c->lock irq_context: 0 kn->active#10 &____s->seqcount irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#5 mount_lock irq_context: 0 &f->f_pos_lock sb_writers#5 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 &f->f_pos_lock sb_writers#5 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sem->wait_lock irq_context: 0 sb_writers#5 &p->pi_lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#13 fs_reclaim irq_context: 0 kn->active#13 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#6 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &device->physical_node_lock irq_context: 0 kn->active#14 fs_reclaim irq_context: 0 kn->active#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#14 &c->lock irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#14 fs_reclaim irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#14 pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#14 &obj_hash[i].lock irq_context: 0 kn->active#9 &n->list_lock irq_context: 0 kn->active#9 &n->list_lock &c->lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sem->wait_lock irq_context: 0 kn->active#12 &c->lock irq_context: 0 &f->f_pos_lock &p->lock &rq->__lock irq_context: 0 kn->active#14 &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 udc_lock irq_context: 0 kn->active#5 remove_cache_srcu irq_context: 0 kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#5 remove_cache_srcu &c->lock irq_context: 0 kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 rcu_state.exp_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &rq->__lock irq_context: 0 kn->active#11 &c->lock irq_context: 0 kn->active#11 &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 fw_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &device->physical_node_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sem->wait_lock irq_context: 0 sb_writers#5 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#15 fs_reclaim irq_context: 0 kn->active#15 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#15 dev_base_lock irq_context: 0 kn->active#16 fs_reclaim irq_context: 0 kn->active#16 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#16 dev_base_lock irq_context: 0 kn->active#17 fs_reclaim irq_context: 0 kn->active#17 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &rq->__lock irq_context: 0 kn->active#18 fs_reclaim irq_context: 0 kn->active#18 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#18 &c->lock irq_context: 0 kn->active#18 &n->list_lock irq_context: 0 kn->active#18 &n->list_lock &c->lock irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#19 fs_reclaim irq_context: 0 kn->active#19 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#20 fs_reclaim irq_context: 0 kn->active#20 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#21 fs_reclaim irq_context: 0 kn->active#21 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#21 &c->lock irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#21 dev_base_lock irq_context: 0 kn->active#22 fs_reclaim irq_context: 0 kn->active#22 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#22 dev_base_lock irq_context: 0 kn->active#23 fs_reclaim irq_context: 0 kn->active#23 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#23 dev_base_lock irq_context: 0 kn->active#24 fs_reclaim irq_context: 0 kn->active#24 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#24 pool_lock#2 irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock fs_reclaim irq_context: 0 &f->f_pos_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &of->mutex irq_context: 0 &f->f_pos_lock &of->mutex kn->active#24 &dev->power.lock irq_context: 0 &f->f_pos_lock &of->mutex kn->active#24 pci_lock irq_context: 0 &f->f_pos_lock &of->mutex kn->active#24 pci_lock pci_config_lock irq_context: 0 &f->f_pos_lock &obj_hash[i].lock irq_context: 0 kn->active#25 fs_reclaim irq_context: 0 kn->active#25 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 fs_reclaim irq_context: 0 kn->active#26 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx &pipe->rd_wait irq_context: 0 kn->active#27 fs_reclaim irq_context: 0 kn->active#27 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &c->lock irq_context: 0 kn->active#28 &rq->__lock irq_context: 0 kn->active#28 fs_reclaim irq_context: 0 kn->active#28 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#29 fs_reclaim irq_context: 0 kn->active#29 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 fs_reclaim irq_context: 0 kn->active#30 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 &c->lock irq_context: 0 kn->active#30 &n->list_lock irq_context: 0 kn->active#30 &n->list_lock &c->lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#31 fs_reclaim irq_context: 0 kn->active#31 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#32 fs_reclaim irq_context: 0 kn->active#32 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#33 fs_reclaim irq_context: 0 kn->active#33 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#31 &c->lock irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#32 &c->lock irq_context: 0 kn->active#32 &n->list_lock irq_context: 0 kn->active#32 &n->list_lock &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 (&journal->j_commit_timer) irq_context: 0 &journal->j_checkpoint_mutex irq_context: 0 &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 &journal->j_checkpoint_mutex &dd->lock irq_context: 0 &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &base->lock irq_context: 0 &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_updates irq_context: 0 &journal->j_list_lock irq_context: 0 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &ei->i_es_lock irq_context: 0 lock#4 irq_context: 0 lock#4 &lruvec->lru_lock irq_context: 0 &mapping->private_lock irq_context: 0 &ret->b_state_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock irq_context: 0 &ei->i_es_lock key#2 irq_context: 0 &dd->lock irq_context: 0 &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_list_lock irq_context: 0 rcu_read_lock &dd->lock irq_context: 0 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &dd->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &dd->lock irq_context: softirq &(&wb->dwork)->timer irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &memcg->move_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#7 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &sb->s_type->i_lock_key#3 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->list_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &journal->j_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_md_lock irq_context: 0 &journal->j_fc_wait irq_context: 0 &journal->j_history_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)writeback irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &p->sequence irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &type->s_umount_key#31 &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &type->s_umount_key#40 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock irq_context: 0 kn->active#29 &c->lock irq_context: 0 kn->active#28 &c->lock irq_context: 0 kn->active#30 remove_cache_srcu irq_context: 0 kn->active#30 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#30 remove_cache_srcu &c->lock irq_context: 0 kn->active#30 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#30 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#31 &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock pool_lock#2 irq_context: 0 kn->active#27 &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: softirq rcu_read_lock &memcg->move_lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 irq_context: softirq rcu_read_lock &xa->xa_lock#7 &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &base->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &base->lock &obj_hash[i].lock irq_context: 0 kn->active#33 &c->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 key#10 irq_context: softirq rcu_read_lock &xa->xa_lock#7 key#11 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 udc_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &n->list_lock &c->lock irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 fw_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#12 &n->list_lock irq_context: 0 kn->active#12 &n->list_lock &c->lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 &hctx->lock irq_context: 0 rcu_read_lock &hctx->lock irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#6 &n->list_lock irq_context: 0 kn->active#6 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#14 &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#14 &c->lock irq_context: 0 kn->active#7 &c->lock irq_context: 0 rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 &pipe->rd_wait &ep->lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx rcu_read_lock &pipe->rd_wait irq_context: 0 &ep->mtx &obj_hash[i].lock irq_context: 0 &sighand->signalfd_wqh irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 &vma->vm_lock->lock pgd_lock irq_context: 0 &vma->vm_lock->lock key irq_context: 0 &vma->vm_lock->lock pcpu_lock irq_context: 0 &vma->vm_lock->lock percpu_counters_lock irq_context: 0 &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 kn->active#13 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#14 irq_context: 0 mapping.invalidate_lock#2 irq_context: 0 mapping.invalidate_lock#2 mmu_notifier_invalidate_range_start irq_context: 0 mapping.invalidate_lock#2 &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#7 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#7 pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#7 &c->lock irq_context: 0 mapping.invalidate_lock#2 lock#4 irq_context: 0 mapping.invalidate_lock#2 tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock#2 &dd->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &dd->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &base->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#2 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#2 &n->list_lock &c->lock irq_context: 0 &mousedev->client_lock irq_context: 0 &mousedev->mutex#2 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &lock->wait_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 lock#4 &lruvec->lru_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rnp->exp_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rnp->exp_wq[0] irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &evdev->mutex &dev->mutex#2 &rnp->exp_lock irq_context: 0 &evdev->mutex &dev->mutex#2 &rnp->exp_wq[1] irq_context: 0 &evdev->mutex &dev->mutex#2 &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 rcu_node_0 irq_context: 0 kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &lock->wait_lock irq_context: 0 &evdev->mutex &lock->wait_lock irq_context: 0 &evdev->mutex &p->pi_lock irq_context: 0 &evdev->mutex &p->pi_lock &rq->__lock irq_context: 0 &evdev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq &(&wb->bw_dwork)->timer irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) &wb->list_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#2 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sem->wait_lock irq_context: 0 sb_writers &p->pi_lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 &rq->__lock irq_context: 0 mapping.invalidate_lock#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &____s->seqcount irq_context: 0 kn->active#33 &n->list_lock irq_context: 0 kn->active#33 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 fs_reclaim &rq->__lock irq_context: 0 kn->active#31 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &rfkill->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss quarantine_lock irq_context: 0 &f->f_pos_lock &n->list_lock irq_context: 0 &f->f_pos_lock &n->list_lock &c->lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#31 &n->list_lock irq_context: 0 kn->active#31 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#7 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &lruvec->lru_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &info->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &xa->xa_lock#7 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &n->list_lock &c->lock irq_context: 0 kn->active#33 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 kn->active#34 fs_reclaim irq_context: 0 kn->active#34 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#34 &c->lock irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 &c->lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &lock->wait_lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &lock->wait_lock irq_context: 0 kn->active#5 &p->pi_lock irq_context: 0 kn->active#5 &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#29 &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &c->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock rcu_node_0 irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &rq->__lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page) irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &obj_hash[i].lock irq_context: 0 kn->active#27 remove_cache_srcu irq_context: 0 kn->active#27 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#35 fs_reclaim irq_context: 0 kn->active#35 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#35 &c->lock irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#36 fs_reclaim irq_context: 0 kn->active#36 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#36 &c->lock irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#28 &n->list_lock irq_context: 0 kn->active#28 &n->list_lock &c->lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 kn->active#30 &____s->seqcount irq_context: 0 kn->active#37 fs_reclaim irq_context: 0 kn->active#37 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &n->list_lock &c->lock irq_context: 0 kn->active#37 &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#7 &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &n->list_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &base->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &lock->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 kn->active#38 fs_reclaim irq_context: 0 kn->active#38 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#38 i2c_dev_list_lock irq_context: 0 &sig->cred_guard_mutex &lock->wait_lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &folio_wait_table[i] irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem quarantine_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock quarantine_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 &f->f_pos_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex key#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &n->list_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &n->list_lock irq_context: 0 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &meta->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem kfence_freelist_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &meta->lock irq_context: 0 videodev_lock irq_context: 0 &dev_instance->mutex irq_context: 0 &dev_instance->mutex fs_reclaim irq_context: 0 &dev_instance->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev_instance->mutex pool_lock#2 irq_context: 0 &dev_instance->mutex vicodec_core:1844:(hdl)->_lock irq_context: 0 &dev_instance->mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &m2m_dev->job_spinlock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &q->done_wq irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &q->mmap_lock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex &mdev->graph_mutex irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex pool_lock#2 irq_context: 0 &dev_instance->mutex &c->lock irq_context: 0 &dev_instance->mutex &n->list_lock irq_context: 0 &dev_instance->mutex &n->list_lock &c->lock irq_context: 0 fh->state->lock irq_context: 0 &vdev->fh_lock irq_context: 0 &dev->dev_mutex irq_context: 0 &dev->dev_mutex fs_reclaim irq_context: 0 &dev->dev_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->dev_mutex pool_lock#2 irq_context: 0 &dev->dev_mutex vim2m:1183:(hdl)->_lock irq_context: 0 &dev->dev_mutex &____s->seqcount irq_context: 0 &dev->dev_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->dev_mutex &obj_hash[i].lock irq_context: 0 &dev->dev_mutex &vdev->fh_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock remove_cache_srcu irq_context: 0 tomoyo_ss tomoyo_policy_lock remove_cache_srcu quarantine_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock remove_cache_srcu &c->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock remove_cache_srcu &n->list_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &m2m_dev->job_spinlock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &q->done_wq irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &q->mmap_lock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &rq->__lock irq_context: 0 &mm->mmap_lock quarantine_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#39 fs_reclaim irq_context: 0 kn->active#39 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#39 &c->lock irq_context: 0 kn->active#39 &____s->seqcount irq_context: 0 kn->active#5 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &stopper->lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &x->wait#8 irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss quarantine_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_node_0 irq_context: 0 kn->active#5 batched_entropy_u8.lock irq_context: 0 kn->active#5 kfence_freelist_lock irq_context: 0 &vcapture->lock irq_context: 0 &mdev->graph_mutex irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 kn->active#37 &____s->seqcount irq_context: 0 tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#37 remove_cache_srcu irq_context: 0 kn->active#37 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#37 remove_cache_srcu &c->lock irq_context: 0 kn->active#37 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#37 remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &mm->mmap_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx key#12 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex#3 irq_context: 0 &mdev->req_queue_mutex &dev->mutex#3 irq_context: 0 &mdev->req_queue_mutex &dev->mutex#3 &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex &rq->__lock irq_context: 0 &mdev->req_queue_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem kfence_freelist_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mdev->req_queue_mutex &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &pcp->lock &zone->lock irq_context: 0 kn->active#37 &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 batched_entropy_u8.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 kfence_freelist_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu irq_context: 0 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &meta->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock kfence_freelist_lock irq_context: 0 &u->bindlock irq_context: 0 &u->bindlock fs_reclaim irq_context: 0 &u->bindlock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &u->bindlock pool_lock#2 irq_context: 0 &u->bindlock batched_entropy_u32.lock irq_context: 0 &u->bindlock &net->unx.table.locks[i] irq_context: 0 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock irq_context: 0 &u->lock &u->lock/1 &dentry->d_lock irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock &sk->sk_peer_lock/1 irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock/1 irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &dentry->d_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&tbl->gc_work)->timer irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 kn->active#39 &n->list_lock irq_context: 0 kn->active#39 &n->list_lock &c->lock irq_context: 0 tasklist_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 tasklist_lock &sighand->siglock kfence_freelist_lock irq_context: 0 &sighand->siglock &meta->lock irq_context: 0 &sighand->siglock kfence_freelist_lock irq_context: 0 &f->f_pos_lock &p->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 lock pidmap_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pool_lock irq_context: softirq (&dom->period_timer) irq_context: softirq (&dom->period_timer) key#13 irq_context: softirq (&dom->period_timer) &p->sequence irq_context: softirq (&dom->period_timer) &obj_hash[i].lock irq_context: softirq (&dom->period_timer) &base->lock irq_context: softirq (&dom->period_timer) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &c->lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock &c->lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq drivers/base/dd.c:321 irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock &k->list_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock &k->k_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work deferred_probe_mutex irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work deferred_probe_work irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &x->wait#10 irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &pool->lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &rq->__lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &obj_hash[i].lock pool_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock quarantine_lock irq_context: 0 kn->active#37 &n->list_lock irq_context: 0 kn->active#37 &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &obj_hash[i].lock pool_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim pgd_lock irq_context: 0 fs_reclaim key irq_context: 0 fs_reclaim pcpu_lock irq_context: 0 fs_reclaim percpu_counters_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem quarantine_lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4/4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &xa->xa_lock#7 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &sem->wait_lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &base->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock pgd_lock irq_context: 0 &mm->mmap_lock key irq_context: 0 &mm->mmap_lock pcpu_lock irq_context: 0 &mm->mmap_lock percpu_counters_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sem->wait_lock irq_context: 0 sb_writers &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &pl->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &pl->lock key#11 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page) irq_context: 0 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &obj_hash[i].lock pool_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&journal->j_commit_timer) &p->pi_lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &journal->j_list_lock &c->lock irq_context: 0 &journal->j_list_lock pool_lock#2 irq_context: 0 rcu_read_lock &base->lock irq_context: 0 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &tags->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 pgd_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 key irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 pcpu_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 percpu_counters_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pgd_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem key irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem percpu_counters_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem quarantine_lock irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &p->pi_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 &obj_hash[i].lock irq_context: 0 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 kn->active#37 remove_cache_srcu &rq->__lock irq_context: 0 kn->active#37 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock quarantine_lock irq_context: 0 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &rq->__lock irq_context: 0 kn->active#40 fs_reclaim irq_context: 0 kn->active#40 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#41 fs_reclaim irq_context: 0 kn->active#41 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#41 &c->lock irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 fs_reclaim irq_context: 0 kn->active#42 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu quarantine_lock irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 kn->active#43 fs_reclaim irq_context: 0 kn->active#43 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#43 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock/1 irq_context: 0 &lo->lo_mutex irq_context: 0 &disk->open_mutex &lo->lo_mutex irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 &disk->open_mutex nbd_index_mutex irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &bdev->bd_size_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &q->queue_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock set->srcu irq_context: 0 &disk->open_mutex &nbd->config_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu rcu_node_0 irq_context: 0 &disk->open_mutex &nbd->config_lock &x->wait#3 irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock set->srcu irq_context: 0 &disk->open_mutex &nbd->config_lock pool_lock#2 irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock &c->lock irq_context: 0 &disk->open_mutex &nbd->config_lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &meta->lock irq_context: 0 &mousedev->mutex/1 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &rfkill->lock irq_context: 0 kn->active#15 &c->lock irq_context: 0 kn->active#16 &c->lock irq_context: 0 kn->active#22 &c->lock irq_context: 0 kn->active#22 &n->list_lock irq_context: 0 kn->active#22 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &c->lock irq_context: 0 kn->active#44 fs_reclaim irq_context: 0 kn->active#44 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#17 remove_cache_srcu irq_context: 0 kn->active#17 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#17 remove_cache_srcu &c->lock irq_context: 0 kn->active#17 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#17 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#15 &____s->seqcount irq_context: 0 kn->active#16 &n->list_lock irq_context: 0 kn->active#16 &n->list_lock &c->lock irq_context: 0 kn->active#17 &c->lock irq_context: 0 kn->active#21 &____s->seqcount irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &disk->open_mutex &new->lock irq_context: 0 &disk->open_mutex &new->lock &mtdblk->cache_mutex irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock &rq->__lock irq_context: 0 kn->active#15 &n->list_lock irq_context: 0 kn->active#15 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock irq_context: 0 kn->active#45 fs_reclaim irq_context: 0 kn->active#45 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#45 &c->lock irq_context: 0 kn->active#45 &n->list_lock irq_context: 0 kn->active#45 &n->list_lock &c->lock irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mtd->master.chrdev_lock irq_context: 0 &mtd->master.chrdev_lock &mm->mmap_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &pcp->lock &zone->lock irq_context: 0 sb_writers &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &base->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss &base->lock irq_context: 0 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &mark->lock irq_context: 0 &group->inotify_data.idr_lock irq_context: 0 &group->inotify_data.idr_lock &obj_hash[i].lock irq_context: 0 &group->inotify_data.idr_lock pool_lock#2 irq_context: 0 destroy_lock irq_context: 0 fs/notify/mark.c:89 irq_context: 0 (wq_completion)events_unbound connector_reaper_work irq_context: 0 (reaper_work).work irq_context: 0 (wq_completion)events_unbound connector_reaper_work destroy_lock irq_context: 0 &x->wait#10 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound (reaper_work).work irq_context: 0 (wq_completion)events_unbound (reaper_work).work destroy_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound connector_reaper_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound (reaper_work).work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &x->wait#3 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &x->wait#3 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 sb_writers#5 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &base->lock irq_context: 0 &mm->mmap_lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &rq->__lock irq_context: 0 &iint->mutex sb_writers#4 pgd_lock irq_context: 0 &iint->mutex sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 key irq_context: 0 &iint->mutex sb_writers#4 pcpu_lock irq_context: 0 &iint->mutex sb_writers#4 percpu_counters_lock irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events (debug_obj_work).work &meta->lock irq_context: 0 (wq_completion)events (debug_obj_work).work kfence_freelist_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &iint->mutex &n->list_lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 userns_state_mutex irq_context: 0 &ei->xattr_sem &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &meta->lock irq_context: 0 &iint->mutex &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &n->list_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex _xmit_LOOPBACK irq_context: 0 rtnl_mutex netpoll_srcu irq_context: 0 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex &im->lock irq_context: 0 rtnl_mutex fib_info_lock irq_context: 0 rtnl_mutex rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex cbs_list_lock irq_context: 0 rtnl_mutex &ndev->lock irq_context: 0 rtnl_mutex &idev->mc_lock irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem irq_context: 0 rtnl_mutex rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex &ifa->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &tb->tb6_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &ndev->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback &dir->lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_INET6 irq_context: 0 &iint->mutex &ei->xattr_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock key#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dd->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#5 tomoyo_ss irq_context: 0 sb_writers#5 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &xattrs->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &mapping->i_mmap_rwsem irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key &xa->xa_lock#7 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#5 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &lruvec->lru_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &info->lock irq_context: 0 &sb->s_type->i_lock_key#4 irq_context: 0 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 sk_lock-AF_INET irq_context: 0 sk_lock-AF_INET slock-AF_INET irq_context: 0 slock-AF_INET irq_context: 0 sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 slock-AF_INET6 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &table->hash[i].lock irq_context: 0 sk_lock-AF_INET &table->hash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_NETLINK &mm->mmap_lock irq_context: 0 sk_lock-AF_NETLINK fs_reclaim irq_context: 0 sk_lock-AF_NETLINK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_NETLINK pool_lock#2 irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock pool_lock#2 irq_context: 0 sk_lock-AF_NETLINK vmap_area_lock irq_context: 0 sk_lock-AF_NETLINK &____s->seqcount irq_context: 0 sk_lock-AF_NETLINK pcpu_alloc_mutex irq_context: 0 sk_lock-AF_NETLINK pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_NETLINK &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK &c->lock irq_context: 0 sk_lock-AF_NETLINK pack_mutex irq_context: 0 sk_lock-AF_NETLINK batched_entropy_u32.lock irq_context: 0 sk_lock-AF_NETLINK text_mutex irq_context: 0 sk_lock-AF_NETLINK text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_NETLINK &fp->aux->used_maps_mutex irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#22 irq_context: 0 kn->active#46 fs_reclaim irq_context: 0 kn->active#46 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#46 &c->lock irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock cpufreq_driver_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock &ifa->lock irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq#2 irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex irq_context: 0 cb_lock &c->lock irq_context: 0 cb_lock rtnl_mutex irq_context: 0 cb_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sysctl_lock irq_context: 0 &f->f_pos_lock &zone->lock irq_context: 0 &f->f_pos_lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &c->lock irq_context: 0 cb_lock genl_mutex &____s->seqcount irq_context: 0 dev_addr_sem irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: 0 cb_lock nlk_cb_mutex-GENERIC irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rlock-AF_NETLINK irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &obj_hash[i].lock irq_context: 0 cb_lock &____s->seqcount irq_context: softirq (&net->sctp.addr_wq_timer) irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 cb_lock &n->list_lock irq_context: 0 cb_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sb_writers#3 tomoyo_ss quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->alloc_lock irq_context: 0 &pipe->rd_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 fs_reclaim irq_context: 0 sb_writers#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 sb_writers#5 mount_lock irq_context: 0 sb_writers#5 sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xattrs->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#7 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key &xa->xa_lock#7 irq_context: 0 sb_writers#5 lock#4 irq_context: 0 sb_writers#5 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 lock#5 irq_context: 0 sb_writers#5 &lruvec->lru_lock irq_context: 0 sb_writers#5 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &n->list_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx rcu_read_lock &sighand->signalfd_wqh irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock pgd_lock irq_context: 0 &mm->mmap_lock key irq_context: 0 &mm->mmap_lock pcpu_lock irq_context: 0 &mm->mmap_lock percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 batched_entropy_u8.lock crngs.lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock crngs.lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock pgd_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock key irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock pcpu_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock percpu_counters_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &pnettable->lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex irq_context: 0 rtnl_mutex napi_hash_lock irq_context: 0 rtnl_mutex lapb_list_lock irq_context: 0 rtnl_mutex x25_neigh_list_lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &u->lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex _xmit_ETHER irq_context: 0 rtnl_mutex &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 rtnl_mutex _xmit_SLIP irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: softirq (&eql->timer) irq_context: softirq (&eql->timer) &eql->queue.lock irq_context: softirq (&eql->timer) &obj_hash[i].lock irq_context: softirq (&eql->timer) &base->lock irq_context: softirq (&eql->timer) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex quarantine_lock irq_context: 0 rtnl_mutex remove_cache_srcu irq_context: 0 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &vi->refill_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: softirq _xmit_ETHER#2 irq_context: 0 rtnl_mutex noop_qdisc.q.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &sem->wait_lock irq_context: 0 rtnl_mutex &rfkill->lock irq_context: 0 rtnl_mutex &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->mutex irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy0 irq_context: 0 (wq_completion)phy0 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy0 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &xa->xa_lock#3 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock noop_qdisc.q.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &sch->q.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex class irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex cbs_list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &c->lock irq_context: 0 (wq_completion)phy1 irq_context: 0 (wq_completion)phy1 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy1 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_VOID irq_context: 0 &u->iolock &u->lock irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &u->iolock &mm->mmap_lock &____s->seqcount irq_context: 0 &u->iolock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &u->iolock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &n->list_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &iint->mutex pgd_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex key irq_context: 0 &sig->cred_guard_mutex &iint->mutex pcpu_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex _xmit_X25 irq_context: 0 rtnl_mutex lapb_list_lock irq_context: 0 rtnl_mutex lapb_list_lock pool_lock#2 irq_context: 0 rtnl_mutex lapb_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex lapb_list_lock &base->lock irq_context: 0 rtnl_mutex lapb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapbeth->up_lock irq_context: 0 rtnl_mutex &lapb->lock irq_context: 0 rtnl_mutex &lapb->lock pool_lock#2 irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex &lapb->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock &base->lock irq_context: 0 rtnl_mutex &lapb->lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->xattr_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &base->lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: softirq rcu_callback rcu_read_lock rt6_exception_lock irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u16.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 &iint->mutex &cfs_rq->removed.lock irq_context: softirq rcu_callback &ul->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 &tty->legacy_mutex &tty->ldisc_sem irq_context: 0 &tty->legacy_mutex tasklist_lock irq_context: 0 &tty->legacy_mutex tasklist_lock &sighand->siglock irq_context: 0 &tty->legacy_mutex tasklist_lock &sighand->siglock &tty->ctrl.lock irq_context: 0 &tty->ldisc_sem rcu_read_lock &tty->ctrl.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock key irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &tty->ctrl.lock irq_context: 0 tasklist_lock rcu_read_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 &tty->legacy_mutex &tty->ctrl.lock irq_context: 0 &tty->legacy_mutex &f->f_lock irq_context: 0 &tty->legacy_mutex &f->f_lock fasync_lock irq_context: 0 &tty->legacy_mutex &obj_hash[i].lock irq_context: 0 &tty->legacy_mutex pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 rcu_read_lock &tty->ctrl.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 &port_lock_key irq_context: 0 &buf->lock irq_context: 0 &tty->ldisc_sem &port_lock_key irq_context: 0 &tty->ldisc_sem &port->lock irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->flow.lock irq_context: 0 rtnl_mutex lapb_list_lock &c->lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &net->packet.sklist_lock irq_context: 0 sk_lock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET slock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &po->bind_lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock ptype_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_node_0 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock irq_context: 0 sk_lock-AF_PACKET &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock ptype_lock irq_context: 0 slock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock irq_context: 0 sk_lock-AF_PACKET fs_reclaim irq_context: 0 sk_lock-AF_PACKET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PACKET pool_lock#2 irq_context: 0 sk_lock-AF_PACKET free_vmap_area_lock irq_context: 0 sk_lock-AF_PACKET free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET vmap_area_lock irq_context: 0 sk_lock-AF_PACKET &____s->seqcount irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_PACKET &c->lock irq_context: 0 sk_lock-AF_PACKET &n->list_lock irq_context: 0 sk_lock-AF_PACKET &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PACKET pack_mutex irq_context: 0 sk_lock-AF_PACKET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_PACKET text_mutex irq_context: 0 sk_lock-AF_PACKET text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_PACKET &fp->aux->used_maps_mutex irq_context: 0 rlock-AF_PACKET irq_context: 0 wlock-AF_PACKET irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &tty->termios_rwsem &tty->read_wait irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock (work_completion)(&buf->work) irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &rq->__lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &base->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &c->lock irq_context: softirq &(&idev->mc_dad_work)->timer irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock irq_context: softirq _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq _xmit_ETHER#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock pool_lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)dm_bufio_cache irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) dm_bufio_clients_lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &base->lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_PACKET &rq->__lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex class irq_context: 0 rtnl_mutex (&tbl->proxy_timer) irq_context: softirq &(&idev->mc_ifc_work)->timer irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock &obj_hash[i].lock irq_context: softirq rcu_callback &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 percpu_counters_lock irq_context: softirq &(&ifa->dad_work)->timer irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock krc.lock irq_context: 0 rtnl_mutex &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rt6_exception_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock once_lock irq_context: softirq rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock once_lock irq_context: softirq rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 fs_reclaim irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock irq_context: 0 sk_lock-AF_INET6 once_lock irq_context: 0 sk_lock-AF_INET6 once_lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock remove_cache_srcu irq_context: 0 &f->f_pos_lock remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: softirq _xmit_ETHER#2 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->packet.sklist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock ptype_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &dir->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET slock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 fanout_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: softirq (&lapb->t1timer) irq_context: softirq (&lapb->t1timer) &lapb->lock irq_context: softirq (&lapb->t1timer) &lapb->lock batched_entropy_u8.lock irq_context: softirq (&lapb->t1timer) &lapb->lock kfence_freelist_lock irq_context: softirq (&lapb->t1timer) &lapb->lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &meta->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh kfence_freelist_lock irq_context: softirq (&lapb->t1timer) &lapb->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock &base->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock &____s->seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq (&dev->watchdog_timer) irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &obj_hash[i].lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &base->lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &base->lock &obj_hash[i].lock irq_context: 0 &xa->xa_lock#7 pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#10 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: softirq (&lapb->t1timer) &lapb->lock &c->lock irq_context: softirq drivers/regulator/core.c:6262 irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (regulator_init_complete_work).work irq_context: 0 (wq_completion)events (regulator_init_complete_work).work &k->list_lock irq_context: 0 (wq_completion)events (regulator_init_complete_work).work &k->k_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &dir->lock#2 irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock fs_reclaim irq_context: 0 &pipe->mutex/1 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock &f->f_lock irq_context: 0 sb_writers#5 &dentry->d_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 &c->lock irq_context: 0 hostname_poll.wait.lock irq_context: 0 &f->f_pos_lock uts_sem irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_PACKET pgd_lock irq_context: 0 sk_lock-AF_PACKET key irq_context: 0 sk_lock-AF_PACKET pcpu_lock irq_context: 0 sk_lock-AF_PACKET percpu_counters_lock irq_context: 0 sk_lock-AF_PACKET &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock once_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock &n->lock irq_context: softirq rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &n->lock irq_context: softirq rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rlock-AF_NETLINK irq_context: softirq rcu_read_lock rcu_read_lock &dir->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET batched_entropy_u16.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &queue->rskq_lock irq_context: 0 sk_lock-AF_INET clock-AF_INET irq_context: 0 sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &rq->__lock irq_context: 0 sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &____s->seqcount irq_context: 0 sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_INET &c->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock irq_context: 0 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET &sd->defer_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &obj_hash[i].lock irq_context: softirq &sd->defer_lock irq_context: 0 &mm->mmap_lock &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET irq_context: softirq (&icsk->icsk_retransmit_timer) irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET irq_context: 0 sk_lock-AF_INET &meta->lock irq_context: 0 sk_lock-AF_INET kfence_freelist_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &dir->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &c->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback uidhash_lock irq_context: softirq rcu_callback ucounts_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET elock-AF_INET irq_context: 0 sk_lock-AF_INET6 batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &____s->seqcount irq_context: 0 &u->iolock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 &pipe->wr_wait irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock pool_lock#2 irq_context: 0 &pipe->wr_wait &p->pi_lock irq_context: 0 &pipe->wr_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &____s->seqcount irq_context: 0 &pipe->mutex/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &n->list_lock irq_context: 0 sk_lock-AF_INET &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 quarantine_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &____s->seqcount irq_context: softirq &c->lock batched_entropy_u8.lock irq_context: softirq &c->lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &meta->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET tk_core.seq.seqcount irq_context: 0 &vma->vm_lock->lock &c->lock irq_context: softirq slock-AF_INET tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq slock-AF_INET &obj_hash[i].lock irq_context: softirq slock-AF_INET &base->lock irq_context: softirq slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu irq_context: 0 sk_lock-AF_INET remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &____s->seqcount irq_context: 0 sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pgd_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 key irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 percpu_counters_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: 0 sb_writers#7 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock pidmap_lock &n->list_lock irq_context: 0 lock pidmap_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 tomoyo_ss irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 elock-AF_INET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock pool_lock irq_context: 0 &pipe->mutex/1 pgd_lock irq_context: 0 &pipe->mutex/1 key irq_context: 0 &pipe->mutex/1 pcpu_lock irq_context: 0 &pipe->mutex/1 percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock percpu_counters_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &rnp->exp_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex.wait_lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 lock#5 irq_context: 0 &ret->b_state_lock &journal->j_list_lock key#15 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &n->list_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex kfence_freelist_lock irq_context: 0 kn->active#47 fs_reclaim irq_context: 0 kn->active#47 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock khugepaged_mm_lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock &p->pi_lock irq_context: 0 lock#3 &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 &rq->__lock irq_context: 0 lock#3 (work_completion)(work) irq_context: 0 rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock &sighand->siglock pool_lock#2 irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock irq_context: 0 &futex_queues[i].lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock irq_context: 0 rcu_read_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock &sighand->siglock &____s->seqcount irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &vma->vm_lock->lock batched_entropy_u8.lock irq_context: 0 &vma->vm_lock->lock kfence_freelist_lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 rcu_read_lock &sighand->siglock kfence_freelist_lock irq_context: 0 &ep->mtx &ep->lock &ep->wq irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &lock->wait_lock irq_context: 0 &ep->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#4 &c->lock irq_context: 0 &ep->mtx kn->active#4 fs_reclaim irq_context: 0 &ep->mtx kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx kn->active#4 pool_lock#2 irq_context: 0 &ep->mtx kn->active#4 &on->poll irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx rcu_read_lock &on->poll irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#4 &n->list_lock irq_context: 0 kn->active#4 &n->list_lock &c->lock irq_context: 0 &ep->mtx remove_cache_srcu irq_context: 0 &ep->mtx remove_cache_srcu quarantine_lock irq_context: 0 &ep->mtx remove_cache_srcu &c->lock irq_context: 0 &ep->mtx remove_cache_srcu &n->list_lock irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 &ep->mtx kn->active#4 &c->lock irq_context: 0 &f->f_pos_lock &p->lock module_mutex irq_context: 0 sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET once_mutex irq_context: 0 sk_lock-AF_INET once_mutex crngs.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &pipe->wr_wait irq_context: 0 rcu_read_lock tasklist_lock irq_context: 0 &ep->mtx rcu_read_lock &pipe->wr_wait irq_context: 0 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#9 irq_context: 0 &f->f_pos_lock sb_writers#9 &attr->mutex irq_context: 0 &f->f_pos_lock sb_writers#9 &attr->mutex &mm->mmap_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#3 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#3 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->xattr_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->xattr_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &type->s_umount_key#41/1 irq_context: 0 &type->s_umount_key#41/1 fs_reclaim irq_context: 0 &type->s_umount_key#41/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 pool_lock#2 irq_context: 0 &type->s_umount_key#41/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#41/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#41/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#41/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#41/1 sb_lock irq_context: 0 &type->s_umount_key#41/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem &c->lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &c->lock irq_context: 0 &type->s_umount_key#41/1 &n->list_lock irq_context: 0 &type->s_umount_key#41/1 &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#41/1 &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#41/1 &dentry->d_lock irq_context: 0 sb_writers#10 irq_context: 0 sb_writers#10 mount_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq#2 irq_context: 0 kn->active#48 fs_reclaim irq_context: 0 kn->active#48 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#48 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#15 irq_context: 0 &f->f_pos_lock sb_writers#10 irq_context: 0 &f->f_pos_lock sb_writers#10 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex css_set_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#10 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 cgroup_mutex fs_reclaim irq_context: 0 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cgroup_mutex &n->list_lock &c->lock irq_context: 0 cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cgroup_mutex css_set_lock cgroup_file_kn_lock irq_context: 0 &type->s_umount_key#42/1 irq_context: 0 &type->s_umount_key#42/1 fs_reclaim irq_context: 0 &type->s_umount_key#42/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 pool_lock#2 irq_context: 0 &type->s_umount_key#42/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#42/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#42/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#42/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#42/1 &c->lock irq_context: 0 &type->s_umount_key#42/1 sb_lock irq_context: 0 &type->s_umount_key#42/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#42/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#43 irq_context: 0 &type->s_umount_key#43 shrinker_rwsem irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock pool_lock#2 irq_context: 0 &type->s_umount_key#43 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#43 rename_lock.seqcount irq_context: 0 &type->s_umount_key#43 &dentry->d_lock irq_context: 0 &type->s_umount_key#43 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#43 &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#43 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#43 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#43 inode_hash_lock irq_context: 0 &type->s_umount_key#43 inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#43 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#43 pool_lock#2 irq_context: 0 &type->s_umount_key#43 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#43 sb_lock irq_context: 0 sb_lock &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cgroup_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cgroup_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &c->lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &____s->seqcount irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) cgroup_mutex irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex cgroup_rstat_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) percpu_ref_switch_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &cgrp->pidlist_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) (wq_completion)cgroup_pidlist_destroy irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) (work_completion)(&cgrp->release_agent_work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_rstat_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) pcpu_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex fs_reclaim irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 irq_context: 0 sb_writers#11 mount_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock key#2 irq_context: 0 cgroup_mutex cpuset_mutex irq_context: 0 cgroup_mutex cpuset_mutex callback_lock irq_context: 0 cgroup_mutex &dom->lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &c->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_mutex callback_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &dom->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq#2 irq_context: 0 kn->active#49 fs_reclaim irq_context: 0 kn->active#49 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#16 irq_context: 0 &f->f_pos_lock sb_writers#11 irq_context: 0 &f->f_pos_lock sb_writers#11 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &obj_hash[i].lock irq_context: 0 kn->active#50 fs_reclaim irq_context: 0 kn->active#50 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_mutex irq_context: 0 &f->f_pos_lock sb_writers#9 &mm->mmap_lock irq_context: 0 &type->s_umount_key#44 irq_context: 0 &type->s_umount_key#44 sb_lock irq_context: 0 &type->s_umount_key#44 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#26 irq_context: 0 &f->f_pos_lock sb_writers#12 irq_context: 0 &f->f_pos_lock sb_writers#12 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#12 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#12 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &sb->s_type->i_lock_key#26 irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 sb_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 sb_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 mnt_id_ida.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 mount_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 mount_lock mount_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock mount_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 &sb->s_type->i_lock_key#26 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#12 &sb->s_type->i_mutex_key#17 entries_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex irq_context: 0 rtnl_mutex dev_addr_sem irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tn->lock irq_context: 0 rtnl_mutex dev_addr_sem &sdata->sec_mtx irq_context: 0 rtnl_mutex dev_addr_sem &sdata->sec_mtx &sec->lock irq_context: 0 rtnl_mutex dev_addr_sem fs_reclaim irq_context: 0 rtnl_mutex dev_addr_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex dev_addr_sem pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem rlock-AF_NETLINK irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock irq_context: 0 rtnl_mutex dev_addr_sem &pn->hash_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem input_pool.lock irq_context: 0 rtnl_mutex _xmit_IEEE802154 irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex proc_subdir_lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex proc_subdir_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_lock nl_table_wait.lock irq_context: 0 nl_table_wait.lock &p->pi_lock irq_context: 0 nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &base->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &wb->list_lock irq_context: 0 &sbi->s_writepages_rwsem irq_context: 0 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq &ei->i_completed_io_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ei->i_completed_io_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ext4__ioend_wq[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ret->b_uptodate_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_commit irq_context: 0 &journal->j_wait_done_commit &p->pi_lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 batched_entropy_u8.lock crngs.lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#8 vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#8 init_mm.page_table_lock irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock &p->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex percpu_ref_switch_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock &p->lock#2 swap_avail_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 proc_poll_wait.lock irq_context: 0 swap_slots_cache_enable_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &x->wait#6 irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up swap_slots_cache_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex rcu_read_lock &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 swap_slots_cache_enable_mutex swap_lock irq_context: 0 &sighand->siglock rcu_read_lock &____s->seqcount#5 irq_context: 0 &sighand->siglock &prev->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&timer) irq_context: softirq (&timer) &obj_hash[i].lock irq_context: softirq (&timer) &base->lock irq_context: softirq (&timer) &base->lock &obj_hash[i].lock irq_context: softirq (&timer) rcu_read_lock pool_lock#2 irq_context: softirq (&timer) rcu_read_lock &c->lock irq_context: softirq (&timer) rcu_read_lock &____s->seqcount irq_context: softirq _xmit_ETHER#2 quarantine_lock irq_context: softirq (&timer) &txlock irq_context: softirq (&timer) &txlock &list->lock#3 irq_context: softirq (&timer) &txwq irq_context: softirq (&timer) &txwq &p->pi_lock irq_context: softirq (&timer) &txwq &p->pi_lock &rq->__lock irq_context: softirq (&timer) &txwq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh &list->lock#5 irq_context: softirq &list->lock#5 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM (console_sem).lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner &port_lock_key irq_context: softirq &rq_wait->wait irq_context: softirq &rq_wait->wait &p->pi_lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &rq_wait->wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 key#13 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock_bh _xmit_X25#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: softirq _xmit_ETHER#2 &meta->lock irq_context: softirq _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET quarantine_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu irq_context: 0 &vma->vm_lock->lock remove_cache_srcu quarantine_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &c->lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &n->list_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &obj_hash[i].lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock &p->pi_lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &ep->mtx &mm->mmap_lock fs_reclaim irq_context: 0 &ep->mtx &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &vma->vm_lock->lock rcu_node_0 irq_context: 0 &journal->j_list_lock key#15 irq_context: 0 sk_lock-AF_INET &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rq->__lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock irq_context: 0 lock#3 rcu_read_lock (wq_completion)mm_percpu_wq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) irq_context: 0 lock#3 &x->wait#10 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 lock#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sem->wait_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &pcp->lock &zone->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 key#11 irq_context: softirq (&n->timer) irq_context: softirq (&n->timer) &n->lock irq_context: softirq (&n->timer) &n->lock &obj_hash[i].lock irq_context: softirq (&n->timer) &n->lock &base->lock irq_context: softirq (&n->timer) &n->lock &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq (&dom->period_timer) &p->sequence key#13 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/wireless/reg.c:236 irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem (&timer.timer) irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fw_lock &x->wait#23 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#80 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem deferred_probe_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem device_links_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fw_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_indoor_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 sk_lock-AF_INET remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page)#2 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: softirq net/wireless/reg.c:533 irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: hardirq|softirq &x->wait#12 &p->pi_lock &cfs_rq->removed.lock irq_context: hardirq|softirq &x->wait#12 &p->pi_lock &rq->__lock irq_context: hardirq|softirq &x->wait#12 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_indoor_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 tomoyo_ss &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&cb->timer) &rq_wait->wait irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock &rq->__lock irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &pl->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &pl->lock key#11 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 key#13 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback kfence_freelist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &meta->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) kfence_freelist_lock irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock rcu_node_0 irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events reg_work rtnl_mutex &c->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &zone->lock irq_context: 0 lock#3 rcu_read_lock &rq->__lock irq_context: 0 lock#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET batched_entropy_u8.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET kfence_freelist_lock irq_context: 0 sk_lock-AF_INET pgd_lock irq_context: 0 sk_lock-AF_INET key irq_context: 0 sk_lock-AF_INET pcpu_lock irq_context: 0 sk_lock-AF_INET percpu_counters_lock irq_context: 0 sk_lock-AF_INET &cfs_rq->removed.lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock crngs.lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &rq->__lock &base->lock irq_context: 0 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &rq->__lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &rq->__lock rcu_read_lock &base->lock irq_context: 0 &rq->__lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &rq->__lock &base->lock irq_context: softirq rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: softirq (&timer) rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&timer) rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&sk->sk_timer) irq_context: softirq (&sk->sk_timer) slock-AF_INET irq_context: softirq (&sk->sk_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&sk->sk_timer) slock-AF_INET &obj_hash[i].lock irq_context: softirq (&sk->sk_timer) slock-AF_INET &base->lock irq_context: softirq (&sk->sk_timer) slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq (&wq_watchdog_timer) &obj_hash[i].lock irq_context: softirq (&wq_watchdog_timer) &base->lock irq_context: softirq (&wq_watchdog_timer) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 sb_writers#5 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 irq_context: 0 &mm->mmap_lock &info->lock irq_context: 0 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock mount_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &wb->list_lock irq_context: 0 &mm->mmap_lock &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sem->wait_lock irq_context: 0 sb_writers#4 &sem->wait_lock irq_context: 0 sb_writers#4 &p->pi_lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sbinfo->stat_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#5 tomoyo_ss &rq->__lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#5 mount_lock irq_context: 0 &mm->mmap_lock sb_writers#5 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock sb_writers#5 &wb->list_lock irq_context: 0 &mm->mmap_lock sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 mount_lock irq_context: 0 &mm->mmap_lock sb_writers#4 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_writers#4 pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &mm->mmap_lock sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &newf->file_lock &newf->resize_wait irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &kcov->lock irq_context: 0 &mm->mmap_lock &kcov->lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &kcov->lock kcov_remote_lock irq_context: 0 &kcov->lock kcov_remote_lock pool_lock#2 irq_context: 0 pid_caches_mutex irq_context: 0 pid_caches_mutex slab_mutex irq_context: 0 pid_caches_mutex slab_mutex fs_reclaim irq_context: 0 pid_caches_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pid_caches_mutex slab_mutex pool_lock#2 irq_context: 0 pid_caches_mutex slab_mutex &c->lock irq_context: 0 pid_caches_mutex slab_mutex &n->list_lock irq_context: 0 pid_caches_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 pid_caches_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem irq_context: 0 pid_caches_mutex slab_mutex &k->list_lock irq_context: 0 pid_caches_mutex slab_mutex lock irq_context: 0 pid_caches_mutex slab_mutex lock kernfs_idr_lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#45 irq_context: 0 &type->s_umount_key#45 sb_lock irq_context: 0 &type->s_umount_key#45 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock hci_sk_list.lock irq_context: 0 misc_mtx &base->lock irq_context: 0 misc_mtx &base->lock &obj_hash[i].lock irq_context: 0 (work_completion)(&(&data->open_timeout)->work) irq_context: 0 &data->open_mutex irq_context: 0 &data->open_mutex fs_reclaim irq_context: 0 &data->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex pool_lock#2 irq_context: 0 &data->open_mutex &____s->seqcount irq_context: 0 &data->open_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex &obj_hash[i].lock pool_lock irq_context: 0 &data->open_mutex &x->wait#9 irq_context: 0 &data->open_mutex hci_index_ida.xa_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &data->open_mutex wq_pool_mutex irq_context: 0 &data->open_mutex wq_pool_mutex &wq->mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock irq_context: 0 &data->open_mutex pin_fs_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &data->open_mutex &k->list_lock irq_context: 0 &data->open_mutex gdp_mutex irq_context: 0 &data->open_mutex gdp_mutex &k->list_lock irq_context: 0 &data->open_mutex gdp_mutex fs_reclaim irq_context: 0 &data->open_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex gdp_mutex pool_lock#2 irq_context: 0 &data->open_mutex gdp_mutex lock irq_context: 0 &data->open_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex lock irq_context: 0 &data->open_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 bt_proto_lock &c->lock irq_context: 0 &data->open_mutex bus_type_sem irq_context: 0 &data->open_mutex sysfs_symlink_target_lock irq_context: 0 &data->open_mutex &c->lock irq_context: 0 &data->open_mutex &n->list_lock irq_context: 0 &data->open_mutex &n->list_lock &c->lock irq_context: 0 &data->open_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex &dev->power.lock irq_context: 0 &data->open_mutex dpm_list_mtx irq_context: 0 &data->open_mutex uevent_sock_mutex irq_context: 0 &data->open_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &data->open_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &data->open_mutex uevent_sock_mutex &c->lock irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &data->open_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex subsys mutex#81 irq_context: 0 &data->open_mutex subsys mutex#81 &k->k_lock irq_context: 0 &data->open_mutex &dev->devres_lock irq_context: 0 &data->open_mutex triggers_list_lock irq_context: 0 &data->open_mutex leds_list_lock irq_context: 0 &data->open_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 &data->open_mutex rfkill_global_mutex irq_context: 0 &data->open_mutex rfkill_global_mutex fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex rfkill_global_mutex pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex &k->list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex bus_type_sem irq_context: 0 &data->open_mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &____s->seqcount irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex &dev->power.lock irq_context: 0 &data->open_mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 &data->open_mutex rfkill_global_mutex &rfkill->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex &k->k_lock irq_context: 0 &data->open_mutex rfkill_global_mutex subsys mutex#41 irq_context: 0 &data->open_mutex rfkill_global_mutex subsys mutex#41 &k->k_lock irq_context: 0 &data->open_mutex rfkill_global_mutex triggers_list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex leds_list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex.wait_lock irq_context: 0 &data->open_mutex &p->pi_lock irq_context: 0 &data->open_mutex &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex &rq->__lock irq_context: 0 &data->open_mutex &rfkill->lock irq_context: 0 &data->open_mutex hci_dev_list_lock irq_context: 0 &data->open_mutex tk_core.seq.seqcount irq_context: 0 &data->open_mutex hci_sk_list.lock irq_context: 0 &data->open_mutex (pm_chain_head).rwsem irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#6 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &data->open_mutex &list->lock#8 irq_context: 0 &data->open_mutex &data->read_wait irq_context: 0 &list->lock#8 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI sock_cookie_ida.xa_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &p->alloc_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &obj_hash[i].lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 hci_dev_list_lock irq_context: 0 (wq_completion)hci1 irq_context: 0 (wq_completion)hci0#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &list->lock#8 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#6 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &list->lock#6 irq_context: 0 &data->read_wait irq_context: softirq (&pool->mayday_timer) &pool->lock/1 irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock irq_context: softirq (&pool->mayday_timer) &obj_hash[i].lock irq_context: softirq (&pool->mayday_timer) &base->lock irq_context: softirq (&pool->mayday_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &list->lock#8 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &list->lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) free_vmap_area_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) vmap_area_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) init_mm.page_table_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &list->lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex &n->list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) free_vmap_area_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) vmap_area_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) init_mm.page_table_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &n->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &n->list_lock &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci2 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#6 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci2#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &list->lock#8 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &n->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock kfence_freelist_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) pool_lock#2 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 &pool->lock/1 &x->wait#10 irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock irq_context: 0 &hdev->req_lock pool_lock#2 irq_context: 0 &hdev->req_lock &list->lock#7 irq_context: 0 &hdev->req_lock &list->lock#6 irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->req_wait_q irq_context: 0 &hdev->req_lock &rq->__lock irq_context: 0 &hdev->req_lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &base->lock irq_context: 0 &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &sighand->siglock irq_context: 0 cgroup_threadgroup_rwsem &sighand->siglock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &hdev->req_lock (&timer.timer) irq_context: 0 &hdev->req_lock &____s->seqcount irq_context: 0 &hdev->req_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#8 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 &hdev->req_lock rcu_read_lock &rq->__lock irq_context: 0 &hdev->req_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#6 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI pool_lock#2 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI sock_cookie_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 &sb->s_type->i_mutex_key#10 hci_dev_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_BLUETOOTH irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &list->lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci3#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &list->lock#8 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#8 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &list->lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#19 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 namespace_sem mnt_id_ida.xa_lock pool_lock#2 irq_context: 0 rcu_read_lock &undo_list->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock sysctl_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) remove_cache_srcu irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &c->lock irq_context: 0 rtnl_mutex &nr_netdev_addr_lock_key irq_context: 0 rtnl_mutex listen_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nl_table_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 pernet_ops_rwsem &n->list_lock irq_context: 0 pernet_ops_rwsem &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: 0 pernet_ops_rwsem uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &sem->wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci4 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#6 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &list->lock#8 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_connlabels_lock irq_context: 0 pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 pernet_ops_rwsem nf_log_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 pernet_ops_rwsem &this->receive_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &list->lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock irq_context: 0 pernet_ops_rwsem devices_rwsem irq_context: 0 pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 &hdev->req_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#8 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 namespace_sem &rq->__lock irq_context: 0 namespace_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#8 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#8 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 tomoyo_ss &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex failover_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nl_table_lock nl_table_wait.lock irq_context: 0 pernet_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &data->open_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &data->open_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex.wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &data->open_mutex batched_entropy_u8.lock irq_context: 0 &data->open_mutex kfence_freelist_lock irq_context: 0 &data->open_mutex &meta->lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)hci5 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#6 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &list->lock#8 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &____s->seqcount irq_context: 0 pernet_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu irq_context: 0 pernet_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem wq_pool_mutex irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &list->lock#4 irq_context: 0 pernet_ops_rwsem &dir->lock#2 irq_context: 0 pernet_ops_rwsem ptype_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &list->lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &local->services_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 pernet_ops_rwsem &call->waitq irq_context: 0 pernet_ops_rwsem &rx->call_lock irq_context: 0 pernet_ops_rwsem &rxnet->call_lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#3 &____s->seqcount#11 irq_context: 0 &f->f_pos_lock sb_writers#3 &(&net->ipv4.ping_group_range.lock)->lock irq_context: 0 &f->f_pos_lock sb_writers#3 &(&net->ipv4.ping_group_range.lock)->lock &____s->seqcount#11 irq_context: 0 misc_mtx &dir->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 rtnl_mutex &r->consumer_lock irq_context: 0 rtnl_mutex &r->consumer_lock &r->producer_lock irq_context: 0 rtnl_mutex failover_lock irq_context: 0 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &mm->mmap_lock irq_context: 0 rtnl_mutex &mm->mmap_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#8 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &n->lock irq_context: 0 rtnl_mutex &n->lock &(&n->ha_lock)->lock irq_context: 0 rtnl_mutex &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex rcu_read_lock lock#8 irq_context: 0 rtnl_mutex rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex &n->lock irq_context: 0 rtnl_mutex &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &dir->lock#2 irq_context: 0 rtnl_mutex &ndev->lock pcpu_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &sem->wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 rtnl_mutex &br->hash_lock irq_context: 0 rtnl_mutex &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock &c->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex nf_hook_mutex irq_context: 0 rtnl_mutex nf_hook_mutex fs_reclaim irq_context: 0 rtnl_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex nf_hook_mutex pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC kfence_freelist_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex j1939_netdev_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock &n->list_lock irq_context: 0 rtnl_mutex &br->hash_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 misc_mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 misc_mtx fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 misc_mtx fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 misc_mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock pool_lock#2 irq_context: 0 rtnl_mutex &bat_priv->tvlv.container_list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 rtnl_mutex key#16 irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock irq_context: softirq &(&bat_priv->nc.work)->timer irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) key#17 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) key#18 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 rtnl_mutex kernfs_idr_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wq->mutex irq_context: 0 rtnl_mutex &wq->mutex &pool->lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex init_lock irq_context: 0 rtnl_mutex init_lock slab_mutex irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex init_lock slab_mutex pool_lock#2 irq_context: 0 rtnl_mutex init_lock slab_mutex &c->lock irq_context: 0 rtnl_mutex init_lock slab_mutex &n->list_lock irq_context: 0 rtnl_mutex init_lock slab_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex init_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex init_lock slab_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex init_lock slab_mutex &k->list_lock irq_context: 0 rtnl_mutex init_lock slab_mutex lock irq_context: 0 rtnl_mutex init_lock slab_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex init_lock slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex init_lock slab_mutex &____s->seqcount irq_context: 0 rtnl_mutex init_lock slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex init_lock fs_reclaim irq_context: 0 rtnl_mutex init_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex init_lock &zone->lock irq_context: 0 rtnl_mutex init_lock &____s->seqcount irq_context: 0 rtnl_mutex init_lock pool_lock#2 irq_context: 0 rtnl_mutex init_lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex init_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex init_lock &base->lock irq_context: 0 rtnl_mutex init_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex init_lock crngs.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex nf_hook_mutex &c->lock irq_context: 0 rtnl_mutex nf_hook_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &____s->seqcount irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_node_0 irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex deferred_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex target_list_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &br->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &pn->hash_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex deferred_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond0 irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: softirq &(&slave->notify_work)->timer irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->mcast.work)->timer irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex lweventlist_lock &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key irq_context: 0 rtnl_mutex team->team_lock_key fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key &c->lock irq_context: 0 rtnl_mutex team->team_lock_key &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key &im->lock irq_context: 0 rtnl_mutex team->team_lock_key _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key lock irq_context: 0 rtnl_mutex team->team_lock_key lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key &rq->__lock irq_context: 0 (wq_completion)bond0#2 irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &meta->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &c->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &n->list_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond0#3 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex crngs.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 rtnl_mutex ptype_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex lweventlist_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#2 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#2 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#2 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#2 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->orig_work)->timer irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#2 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) key#19 irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &c->lock irq_context: softirq &(&hdev->cmd_timer)->timer irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work irq_context: 0 (wq_completion)events_power_efficient (gc_work).work tk_core.seq.seqcount irq_context: 0 (wq_completion)events_power_efficient (gc_work).work "ratelimiter_table_lock" irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#2 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_NONE irq_context: 0 rtnl_mutex lock#9 irq_context: 0 tomoyo_ss rcu_node_0 irq_context: 0 (wq_completion)bond0#4 irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)events deferred_process_work &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 irq_context: 0 rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#3 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#3 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &rq->__lock irq_context: 0 (wq_completion)bond0#5 irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_node_0 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &hsr->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pin_fs_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond0#6 irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#4 irq_context: 0 rtnl_mutex team->team_lock_key#4 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#4 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#4 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#4 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#4 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 irq_context: 0 rtnl_mutex team->team_lock_key#5 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#5 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#5 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#5 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#5 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &n->list_lock &c->lock irq_context: 0 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 rtnl_mutex gdp_mutex lock irq_context: 0 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 rtnl_mutex &k->k_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#6 irq_context: 0 rtnl_mutex team->team_lock_key#6 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#6 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#6 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#6 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#6 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#6 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#6 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &rq->__lock irq_context: 0 rtnl_mutex rcu_node_0 irq_context: 0 rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 (console_sem).lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock mount_lock irq_context: 0 rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex mount_lock irq_context: 0 rtnl_mutex mount_lock mount_lock.seqcount irq_context: softirq (&app->join_timer) irq_context: softirq (&app->join_timer) &app->lock irq_context: softirq (&app->join_timer) &list->lock#11 irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock irq_context: softirq (&app->join_timer) &app->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer) &app->lock &base->lock irq_context: softirq (&app->join_timer) &app->lock &base->lock &obj_hash[i].lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &____s->seqcount irq_context: softirq (&app->join_timer)#2 irq_context: softirq (&app->join_timer)#2 &app->lock#2 irq_context: softirq (&app->join_timer)#2 &list->lock#12 irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock irq_context: softirq (&app->join_timer)#2 &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 &base->lock irq_context: softirq (&app->join_timer)#2 &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex uevent_sock_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key pool_lock#2 irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &xa->xa_lock#14 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key &____s->seqcount irq_context: 0 rtnl_mutex lweventlist_lock &n->list_lock irq_context: 0 rtnl_mutex lweventlist_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/1 irq_context: 0 rtnl_mutex nf_hook_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex nf_hook_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex pgd_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex key irq_context: 0 rtnl_mutex rcu_state.exp_mutex pcpu_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex percpu_counters_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex rcu_read_lock &tap_major->minor_lock irq_context: 0 rtnl_mutex rcu_read_lock &tap_major->minor_lock pool_lock#2 irq_context: 0 rtnl_mutex req_lock irq_context: 0 rtnl_mutex &x->wait#11 irq_context: 0 rtnl_mutex subsys mutex#82 irq_context: 0 rtnl_mutex subsys mutex#82 &k->k_lock irq_context: 0 rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 kn->active#51 fs_reclaim irq_context: 0 kn->active#51 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#52 fs_reclaim irq_context: 0 kn->active#52 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock nsim_bus_dev_ids.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &x->wait#9 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock bus_type_sem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock sysfs_symlink_target_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock dpm_list_mtx irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex device_links_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fwnode_link_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex device_links_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key devlinks.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &xa->xa_lock#15 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &xa->xa_lock#15 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: softirq (&tun->flow_gc_timer) irq_context: softirq (&tun->flow_gc_timer) &tun->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_event_queue_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &(&fn_net->fib_chain)->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock (&timer.timer) irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key stack_depot_init_mutex irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock nsim_bus_dev_list_lock.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &xa->xa_lock#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex net_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &x->wait#9 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bus_type_sem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dpm_list_mtx irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex subsys mutex#17 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &dir->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_hotplug_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_base_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex input_pool.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &tbl->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex failover_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pnettable->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex smc_ib_devices.mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &vn->sock_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&hwstats->traffic_dw)->timer irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&app->periodic_timer) irq_context: softirq (&app->periodic_timer) &app->lock irq_context: softirq (&app->periodic_timer) &app->lock &obj_hash[i].lock irq_context: softirq (&app->periodic_timer) &app->lock &base->lock irq_context: softirq (&app->periodic_timer) &app->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex probe_waitqueue.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock subsys mutex#83 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#52 &c->lock irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 cb_lock genl_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 devlinks.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &xa->xa_lock#15 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock nsim_bus_dev_list_lock.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex devnet_rename_sem irq_context: 0 rtnl_mutex devnet_rename_sem (console_sem).lock irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner &port_lock_key irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem &rq->__lock irq_context: 0 rtnl_mutex devnet_rename_sem fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem &k->list_lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem kernfs_rename_lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem kernfs_rename_lock irq_context: 0 rtnl_mutex devnet_rename_sem &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem &____s->seqcount irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex devnet_rename_sem &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 rtnl_mutex &devlink_port->type_lock irq_context: 0 rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock krc.lock irq_context: 0 rtnl_mutex &nft_net->commit_mutex irq_context: 0 rtnl_mutex &ent->pde_unload_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &(&fn_net->fib_chain)->lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 stack_depot_init_mutex irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &xa->xa_lock#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex net_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &x->wait#9 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bus_type_sem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dpm_list_mtx irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex subsys mutex#17 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &dir->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_hotplug_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_base_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex input_pool.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &tbl->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex failover_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pnettable->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex smc_ib_devices.mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &vn->sock_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &handshake->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &table->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->endpoint_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: softirq &(&conn->info_timer)->timer irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&conn->info_timer)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&conn->info_timer)->work) &conn->chan_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active#51 &c->lock irq_context: 0 kn->active#51 &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#2 &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 devlinks.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &xa->xa_lock#15 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex _xmit_SIT irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &(&fn_net->fib_chain)->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 stack_depot_init_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &xa->xa_lock#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex net_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &x->wait#9 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bus_type_sem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dpm_list_mtx irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex subsys mutex#17 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &dir->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_base_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex input_pool.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &tbl->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex failover_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pnettable->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex smc_ib_devices.mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &vn->sock_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem &br->lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &base->lock irq_context: 0 rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&brmctx->ip6_own_query.timer) irq_context: softirq (&brmctx->ip6_own_query.timer) &br->multicast_lock irq_context: softirq (&brmctx->ip4_own_query.timer) irq_context: softirq (&brmctx->ip4_own_query.timer) &br->multicast_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex _xmit_TUNNEL irq_context: 0 rtnl_mutex _xmit_IPGRE irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&in_dev->mr_ifc_timer) irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &ul->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#7 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#9 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) &base->lock irq_context: softirq (&in_dev->mr_ifc_timer) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#3 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#3 &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 devlinks.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &xa->xa_lock#15 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &pcp->lock &zone->lock irq_context: 0 rtnl_mutex dev_addr_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex devnet_rename_sem &pcp->lock &zone->lock irq_context: 0 rtnl_mutex devnet_rename_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &(&fn_net->fib_chain)->lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 stack_depot_init_mutex irq_context: 0 rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &xa->xa_lock#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex net_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &x->wait#9 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bus_type_sem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex sysfs_symlink_target_lock irq_context: softirq &(&br->gc_work)->timer irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dpm_list_mtx irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex subsys mutex#17 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &dir->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_base_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex input_pool.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &tbl->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex failover_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: softirq (&pool->mayday_timer) &pool->lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_inum_ida.xa_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pnettable->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex smc_ib_devices.mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &vn->sock_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex _xmit_TUNNEL6 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem batched_entropy_u8.lock irq_context: 0 rtnl_mutex devnet_rename_sem kfence_freelist_lock irq_context: 0 rtnl_mutex devnet_rename_sem &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex &ndev->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 devlinks.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &xa->xa_lock#15 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex dev_addr_sem &obj_hash[i].lock pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#10 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bond->stats_lock/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &(&fn_net->fib_chain)->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 stack_depot_init_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &xa->xa_lock#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex net_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &x->wait#9 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bus_type_sem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dpm_list_mtx irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex subsys mutex#17 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &dir->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_hotplug_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_base_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex input_pool.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &tbl->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex failover_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &pnettable->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex smc_ib_devices.mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &vn->sock_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &rq->__lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex (inet6addr_validator_chain).rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock deferred_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock (console_sem).lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock irq_context: softirq (&pmctx->ip6_own_query.timer) irq_context: softirq (&pmctx->ip6_own_query.timer) &br->multicast_lock irq_context: softirq (&pmctx->ip4_own_query.timer) irq_context: softirq (&pmctx->ip4_own_query.timer) &br->multicast_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &br->hash_lock irq_context: softirq rcu_read_lock &br->hash_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock &br->hash_lock nl_table_lock irq_context: softirq rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: softirq rcu_read_lock &br->multicast_lock irq_context: softirq rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock &br->multicast_lock &dir->lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock deferred_lock irq_context: softirq rcu_read_lock &br->multicast_lock &c->lock irq_context: softirq rcu_read_lock &br->multicast_lock &n->list_lock irq_context: softirq rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &br->multicast_lock nl_table_lock irq_context: softirq rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: softirq rcu_read_lock &br->multicast_lock &base->lock irq_context: softirq rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: softirq rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock deferred_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &meta->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_IPGRE &c->lock irq_context: 0 rtnl_mutex _xmit_IPGRE &____s->seqcount irq_context: 0 cb_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 devlinks.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &xa->xa_lock#15 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#5 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#5 &devlink_port->type_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex _xmit_ETHER/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 batched_entropy_u32.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &(&fn_net->fib_chain)->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 stack_depot_init_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pin_fs_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &xa->xa_lock#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &base->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &devlink_port->type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex net_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &x->wait#9 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex lock kernfs_idr_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bus_type_sem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dpm_list_mtx irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex subsys mutex#17 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &dir->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_hotplug_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_base_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex input_pool.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex sysctl_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex failover_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pcpu_alloc_mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_subdir_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pnettable->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex smc_ib_devices.mutex irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &vn->sock_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock pool_lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &rq->__lock irq_context: softirq (&hsr->announce_timer) irq_context: softirq (&hsr->announce_timer) rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex &rq->__lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &hsr->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#6 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#6 &devlink_port->type_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &____s->seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &____s->seqcount irq_context: 0 &journal->j_list_lock &meta->lock irq_context: 0 &journal->j_list_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &____s->seqcount irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#3 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &hsr->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#6 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 &____s->seqcount irq_context: softirq (&hsr->prune_timer) irq_context: softirq (&hsr->prune_timer) &hsr->list_lock irq_context: softirq (&hsr->prune_timer) &obj_hash[i].lock irq_context: softirq (&hsr->prune_timer) &base->lock irq_context: softirq (&hsr->prune_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 rtnl_mutex &nn->netlink_tap_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex j1939_netdev_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock &br->hash_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock key#16 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex pgd_lock irq_context: 0 rtnl_mutex key irq_context: 0 rtnl_mutex percpu_counters_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &base->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#5 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&bat_priv->tt.work)->timer irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#20 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &bat_priv->tt.req_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &bat_priv->tt.roam_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) quarantine_lock irq_context: 0 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rcu_read_lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock kfence_freelist_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rq->__lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#4 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &____s->seqcount irq_context: softirq (&ndev->rs_timer) irq_context: softirq (&ndev->rs_timer) &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) pool_lock#2 irq_context: softirq (&ndev->rs_timer) &dir->lock#2 irq_context: softirq (&ndev->rs_timer) &ul->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &base->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex rcu_read_lock kfence_freelist_lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex lweventlist_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock irq_context: softirq rcu_read_lock &br->hash_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: softirq rcu_read_lock &list->lock#13 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &list->lock#13 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) pool_lock#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &meta->lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 rtnl_mutex key#21 irq_context: 0 rtnl_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex &dir->lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex k-slock-AF_INET irq_context: 0 rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu pgd_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu key irq_context: 0 &mm->mmap_lock remove_cache_srcu pcpu_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu percpu_counters_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 rtnl_mutex k-slock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock fs_reclaim irq_context: 0 rtnl_mutex &wg->device_update_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &wg->device_update_lock &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex &wg->device_update_lock &dir->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-slock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock &c->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &____s->seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-slock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock &wg->socket_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &wg->socket_update_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock &list->lock#14 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rtnl_mutex pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex pool_lock#2 irq_context: 0 rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &c->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: softirq &keypair->receiving_counter.lock irq_context: softirq &peer->keypairs.keypair_update_lock irq_context: softirq &list->lock#14 irq_context: softirq rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: softirq rcu_read_lock_bh &base->lock irq_context: softirq rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 misc_mtx rfkill_global_mutex irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx fs_reclaim irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx remove_cache_srcu irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx remove_cache_srcu quarantine_lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx remove_cache_srcu &c->lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx remove_cache_srcu &n->list_lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx pool_lock#2 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &rfkill->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_radio_lock irq_context: 0 cb_lock genl_mutex &x->wait#9 irq_context: 0 cb_lock genl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex gdp_mutex irq_context: 0 cb_lock genl_mutex gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex lock irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex bus_type_sem irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex subsys mutex#56 irq_context: 0 cb_lock genl_mutex subsys mutex#56 &k->k_lock irq_context: 0 cb_lock genl_mutex device_links_lock irq_context: 0 cb_lock genl_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex deferred_probe_mutex irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cb_lock genl_mutex wq_pool_mutex irq_context: 0 cb_lock genl_mutex wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex crngs.lock irq_context: 0 cb_lock genl_mutex triggers_list_lock irq_context: 0 cb_lock genl_mutex leds_list_lock irq_context: 0 cb_lock genl_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 cb_lock genl_mutex &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex param_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex param_lock rate_ctrl_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex (console_sem).lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex kobj_ns_type_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#57 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#57 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &base->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rfkill_global_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex bus_type_sem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rfkill->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex subsys mutex#41 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex subsys mutex#41 &k->k_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex triggers_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex leds_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex.wait_lock irq_context: 0 cb_lock genl_mutex pin_fs_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh key#21 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &tn->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx failover_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_inum_ida.xa_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pnettable->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx smc_ib_devices.mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ndev->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 cb_lock genl_mutex (inetaddr_chain).rwsem irq_context: 0 cb_lock genl_mutex inet6addr_chain.lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 cb_lock rcu_read_lock &c->lock irq_context: 0 cb_lock &pcp->lock &zone->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rdev->mgmt_registrations_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (&dwork->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->color_collision_detect_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 cb_lock rtnl_mutex.wait_lock irq_context: 0 cb_lock &p->pi_lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx rtnl_mutex.wait_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)phy3 irq_context: 0 (wq_completion)phy3 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy3 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &list->lock#15 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &wdev->event_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &c->lock irq_context: softirq (&ndev->rs_timer) &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx nl_table_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx nl_table_wait.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &list->lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)events wireless_nlevent_work irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem pool_lock#2 irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem nl_table_lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem quarantine_lock irq_context: 0 (wq_completion)phy4 irq_context: 0 (wq_completion)phy4 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy4 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &c->lock irq_context: softirq rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: softirq &list->lock#16 irq_context: softirq rcu_read_lock lock#6 irq_context: softirq rcu_read_lock lock#6 kcov_remote_lock irq_context: softirq rcu_read_lock &local->rx_path_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &list->lock#15 irq_context: softirq rcu_read_lock &local->rx_path_lock &rdev->wiphy_work_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 kcov_remote_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#46/1 irq_context: 0 &type->s_umount_key#46/1 fs_reclaim irq_context: 0 &type->s_umount_key#46/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#46/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#46/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#46/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#46/1 sb_lock irq_context: 0 &type->s_umount_key#46/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#46/1 &c->lock irq_context: 0 &type->s_umount_key#46/1 &____s->seqcount irq_context: 0 &type->s_umount_key#46/1 pool_lock#2 irq_context: 0 &type->s_umount_key#46/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#46/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#46/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_lock_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 binderfs_minors_mutex irq_context: 0 &type->s_umount_key#46/1 binderfs_minors_mutex binderfs_minors.xa_lock irq_context: 0 &type->s_umount_key#46/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rename_lock.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 pool_lock#2 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &dentry->d_lock &wq irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &sb->s_type->i_lock_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rcu_read_lock iunique_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &c->lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex css_set_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_file_kn_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock cgroup_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex task_group_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->i_mutex_dir_key#6 irq_context: 0 &type->i_mutex_dir_key#6 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq irq_context: 0 kn->active#53 fs_reclaim irq_context: 0 kn->active#53 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#54 fs_reclaim irq_context: 0 kn->active#54 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &fsnotify_mark_srcu irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex css_set_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock cgroup_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cgroup_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex jump_label_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex jump_label_mutex text_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex callback_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex percpu_counters_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &pgdat->memcg_lru.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->i_mutex_dir_key#7 irq_context: 0 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 kn->active#55 fs_reclaim irq_context: 0 kn->active#55 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex irq_context: 0 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->alloc_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->alloc_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex cpuset_attach_wq.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events kernfs_notify_work &root->kernfs_supers_rwsem inode_hash_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock pool_lock#2 irq_context: 0 rtnl_mutex &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock krc.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &____s->seqcount#2 irq_context: 0 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &xa->xa_lock#13 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &xa->xa_lock#13 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#13 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#13 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount irq_context: 0 kn->active#56 &rq->__lock irq_context: 0 kn->active#56 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#56 fs_reclaim irq_context: 0 kn->active#56 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#56 stock_lock irq_context: 0 kn->active#56 &c->lock irq_context: 0 kn->active#56 pool_lock#2 irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem &____s->seqcount#2 irq_context: 0 kn->active#57 fs_reclaim irq_context: 0 kn->active#57 fs_reclaim &rq->__lock irq_context: 0 kn->active#57 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#57 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#57 stock_lock irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#57 memcg_max_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &xa->xa_lock#13 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &xa->xa_lock#13 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex devcgroup_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &____s->seqcount#2 irq_context: softirq rcu_callback cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: softirq rcu_callback cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: softirq rcu_callback stock_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#13 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#13 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 kn->active#55 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock rcu_read_lock &sighand->siglock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &newf->file_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 &xa->xa_lock#13 &c->lock irq_context: 0 &xa->xa_lock#13 &____s->seqcount#2 irq_context: 0 &xa->xa_lock#13 &____s->seqcount irq_context: 0 &xa->xa_lock#13 pool_lock#2 irq_context: 0 nf_hook_mutex irq_context: 0 nf_hook_mutex fs_reclaim irq_context: 0 nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_hook_mutex stock_lock irq_context: 0 nf_hook_mutex &c->lock irq_context: 0 nf_hook_mutex &n->list_lock irq_context: 0 nf_hook_mutex &n->list_lock &c->lock irq_context: 0 nf_hook_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 rtnl_mutex _xmit_ETHER &____s->seqcount#2 irq_context: 0 nf_hook_mutex &____s->seqcount#2 irq_context: 0 nf_hook_mutex &____s->seqcount irq_context: 0 ebt_mutex &mm->mmap_lock irq_context: 0 ebt_mutex &c->lock irq_context: 0 ebt_mutex &____s->seqcount#2 irq_context: 0 ebt_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 stock_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock stock_lock irq_context: 0 &xt[i].mutex &c->lock irq_context: 0 &xt[i].mutex &n->list_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &xt[i].mutex vmap_area_lock irq_context: 0 &xt[i].mutex &____s->seqcount irq_context: 0 &xt[i].mutex &per_cpu(xt_recseq, i) irq_context: 0 &xt[i].mutex &obj_hash[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock irq_context: 0 &xt[i].mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) quarantine_lock irq_context: 0 nf_nat_proto_mutex irq_context: 0 nf_nat_proto_mutex fs_reclaim irq_context: 0 nf_nat_proto_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_nat_proto_mutex pool_lock#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex irq_context: 0 nf_nat_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 nf_nat_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_nat_proto_mutex nf_hook_mutex stock_lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 nf_nat_proto_mutex cpu_hotplug_lock irq_context: 0 nf_nat_proto_mutex &obj_hash[i].lock irq_context: 0 nf_nat_proto_mutex stock_lock irq_context: 0 nf_hook_mutex rcu_read_lock pool_lock#2 irq_context: 0 nf_hook_mutex &obj_hash[i].lock irq_context: 0 &xt[i].mutex &rq->__lock irq_context: 0 &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex &c->lock irq_context: 0 nf_nat_proto_mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex &____s->seqcount irq_context: 0 &xt[i].mutex &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 elock-AF_INET6 irq_context: 0 &pipe->mutex/1 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#13 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#13 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle stock_lock irq_context: 0 loop_validate_mutex irq_context: 0 loop_validate_mutex &lo->lo_mutex irq_context: 0 &fsnotify_mark_srcu fs_reclaim irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 &xt[i].mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 purge_vmap_area_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem stock_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount#2 irq_context: 0 &sig->cred_guard_mutex &____s->seqcount#2 irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 &vma->vm_lock->lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#13 irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#13 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 &r->consumer_lock irq_context: 0 &type->i_mutex_dir_key#4 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock stock_lock irq_context: 0 sb_writers#8 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#8 tomoyo_ss pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &____s->seqcount#2 irq_context: 0 rtnl_mutex &ndev->lock &____s->seqcount#2 irq_context: 0 &group->notification_waitq &ep->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &net->xdp.lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 stock_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &n->list_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &____s->seqcount irq_context: 0 crypto_cfg_mutex irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh key#21 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xdp.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->map_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_XDP irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &msk->pm.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) irq_context: 0 &sighand->siglock &p->pi_lock irq_context: 0 tasklist_lock &p->alloc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem stock_lock irq_context: 0 pcpu_lock stock_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#4 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#4 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 tasklist_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &s->s_inode_list_lock irq_context: 0 sb_writers#4 sb_internal irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal pool_lock#2 irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 sb_internal &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal &____s->seqcount irq_context: 0 sb_writers#4 sb_internal rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 inode_hash_lock irq_context: 0 sb_writers#4 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &fsnotify_mark_srcu &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &p->lock &____s->seqcount#2 irq_context: 0 sb_writers#8 &xattrs->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &xt[i].mutex init_mm.page_table_lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &wg->device_update_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex &wg->device_update_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#3 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#4 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex genl_mutex.wait_lock irq_context: softirq (&peer->timer_persistent_keepalive) irq_context: softirq (&peer->timer_persistent_keepalive) pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) &list->lock#14 irq_context: softirq (&peer->timer_persistent_keepalive) tk_core.seq.seqcount irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &lock->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER batched_entropy_u8.lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex.wait_lock irq_context: 0 cb_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &____s->seqcount#2 irq_context: 0 rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 rtnl_mutex &ndev->lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)phy5 irq_context: 0 (wq_completion)phy5 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy5 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#5 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sighand->siglock stock_lock irq_context: 0 &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#5 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#6 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_owner irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 &xt[i].mutex &n->list_lock &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock &p->alloc_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount irq_context: 0 sb_writers#4 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex lweventlist_lock &____s->seqcount#2 irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock &n->list_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#5 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy6 irq_context: 0 (wq_completion)phy6 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy6 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh icmp_global.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh icmp_global.lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: softirq (&peer->timer_persistent_keepalive) &c->lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &xa->xa_lock#13 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &xa->xa_lock#13 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sighand->siglock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock hwsim_radio_lock &n->list_lock irq_context: softirq rcu_read_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events &rq->__lock irq_context: 0 (wq_completion)bat_events &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 (kmod_concurrent_max).lock irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &x->wait#17 irq_context: 0 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount irq_context: 0 kn->active#53 &c->lock irq_context: 0 kn->active#53 &____s->seqcount#2 irq_context: 0 kn->active#53 &n->list_lock irq_context: 0 kn->active#53 &n->list_lock &c->lock irq_context: 0 kn->active#54 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &p->pi_lock &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)phy7 irq_context: 0 (wq_completion)phy7 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy7 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 pgd_lock irq_context: 0 sk_lock-AF_INET6 key irq_context: 0 sk_lock-AF_INET6 pcpu_lock irq_context: 0 sk_lock-AF_INET6 percpu_counters_lock irq_context: 0 sk_lock-AF_INET6 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 running_helpers_waitq.lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &n->list_lock irq_context: softirq (&ndev->rs_timer) &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &sighand->siglock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock stock_lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock stock_lock irq_context: 0 tasklist_lock &sighand->siglock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#55 &c->lock irq_context: 0 kn->active#55 &____s->seqcount#2 irq_context: 0 kn->active#55 &n->list_lock irq_context: 0 kn->active#55 &n->list_lock &c->lock irq_context: 0 pcpu_alloc_mutex fs_reclaim irq_context: 0 pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pcpu_alloc_mutex &____s->seqcount irq_context: 0 sb_writers#4 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &mapping->private_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 sb_writers#4 lock#4 irq_context: 0 sb_writers#4 lock#5 irq_context: 0 sb_writers#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock rcu_read_lock key#10 irq_context: 0 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_prealloc_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 sb_internal jbd2_handle &(ei->i_block_reservation_lock) key#14 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 integrity_iint_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &c->lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &____s->seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock &list->lock#17 irq_context: softirq (&lapb->t1timer) &lapb->lock &list->lock#18 irq_context: softirq &list->lock#18 irq_context: softirq rcu_read_lock x25_neigh_list_lock irq_context: softirq rcu_read_lock &list->lock#19 irq_context: softirq rcu_read_lock x25_list_lock irq_context: softirq rcu_read_lock x25_forward_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 sb_writers#8 kn->active#5 &c->lock irq_context: 0 sb_writers#8 kn->active#5 &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#9 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#8 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &iint->mutex &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &meta->lock irq_context: 0 &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock &ul->lock irq_context: 0 sb_writers#3 tomoyo_ss &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#8 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 link_idr_lock irq_context: 0 (wq_completion)phy8 irq_context: 0 (wq_completion)phy8 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy8 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx quarantine_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &lock->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 rtnl_mutex net_rwsem nl_table_lock irq_context: 0 rtnl_mutex net_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#10 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#10 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: softirq rcu_read_lock &ifibss->incomplete_lock irq_context: softirq rcu_read_lock &rdev->wiphy_work_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->rx_path_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &____s->seqcount#2 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &n->list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_nat_proto_mutex &n->list_lock irq_context: 0 nf_nat_proto_mutex &n->list_lock &c->lock irq_context: 0 nf_nat_proto_mutex &rq->__lock irq_context: 0 nf_nat_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&w->w) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 &xt[i].mutex &lock->wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &rq->__lock irq_context: softirq rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)phy9 irq_context: 0 (wq_completion)phy9 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy9 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)phy10 irq_context: 0 (wq_completion)phy10 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy10 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_owner irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &c->lock irq_context: 0 sk_lock-AF_INET6 crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET6 &base->lock irq_context: 0 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &asoc->wait irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 __ip_vs_mutex irq_context: 0 __ip_vs_mutex &rq->__lock irq_context: 0 __ip_vs_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 kn->active#5 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &obj_hash[i].lock irq_context: 0 kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex (console_sem).lock irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner_lock irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#5 &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_owner.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex batched_entropy_u8.lock irq_context: 0 &xt[i].mutex kfence_freelist_lock irq_context: 0 &xt[i].mutex &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)phy11 irq_context: 0 (wq_completion)phy11 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy11 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)phy12 irq_context: 0 (wq_completion)phy12 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy12 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock console_owner irq_context: 0 &type->s_umount_key#46/1 &____s->seqcount#2 irq_context: 0 &type->s_umount_key#46/1 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 &c->lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active#57 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rq->__lock irq_context: softirq rcu_read_lock hwsim_radio_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem &c->lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex &p->pi_lock irq_context: 0 nf_nat_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &obj_hash[i].lock pool_lock irq_context: 0 nf_nat_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 nf_nat_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &n->list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 nf_hook_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#7 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#7 pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) &____s->seqcount#2 irq_context: softirq (&peer->timer_persistent_keepalive) &____s->seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#55 &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu irq_context: 0 &xt[i].mutex remove_cache_srcu quarantine_lock irq_context: 0 &xt[i].mutex remove_cache_srcu &c->lock irq_context: 0 &xt[i].mutex remove_cache_srcu &n->list_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &xt[i].mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#11 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#12 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex kfence_freelist_lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &n->list_lock &c->lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock &c->lock irq_context: 0 (wq_completion)phy13 irq_context: 0 (wq_completion)phy13 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy13 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &hashinfo->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &meta->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &lock->wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &pnsocks.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 resource_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PHONET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &list->lock#20 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 clock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 (wq_completion)phy14 irq_context: 0 (wq_completion)phy14 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy14 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &group->mark_mutex &____s->seqcount#2 irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#6 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#6 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 kn->active#56 &____s->seqcount#2 irq_context: 0 kn->active#56 &n->list_lock irq_context: 0 kn->active#56 &n->list_lock &c->lock irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ebt_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ebt_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_nat_proto_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 nf_nat_proto_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &dentry->d_lock irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 sb_writers#4 &iint->mutex irq_context: 0 sb_writers#4 &ei->xattr_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &ei->i_data_sem &____s->seqcount irq_context: 0 &ei->i_data_sem pool_lock#2 irq_context: 0 &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &folio_wait_table[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_es_lock key#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 quarantine_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &____s->seqcount#2 irq_context: 0 sb_writers#8 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 sb_internal &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle pgd_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle stock_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle key irq_context: 0 sb_writers#4 sb_internal jbd2_handle pcpu_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle percpu_counters_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 &group->mark_mutex &rq->__lock irq_context: 0 &group->mark_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->dat.work)->timer irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->bla.work)->timer irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &hash->list_locks[i] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) key#21 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &entry->crc_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 &obj_hash[i].lock irq_context: 0 ebt_mutex &rq->__lock irq_context: 0 ebt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex quarantine_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh key#21 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_sockopt_mutex &rq->__lock irq_context: 0 nf_sockopt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 &n->list_lock &c->lock irq_context: 0 sb_writers#8 kn->active#5 &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle key#4 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex qdisc_mod_lock irq_context: 0 rtnl_mutex &block->lock irq_context: 0 rtnl_mutex &block->cb_lock irq_context: 0 rtnl_mutex &block->cb_lock &rq->__lock irq_context: 0 rtnl_mutex &block->cb_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock pool_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key irq_context: 0 rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &sch->q.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock &p->pi_lock irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock &p->pi_lock &rq->__lock irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex &p->pi_lock irq_context: 0 wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 wq_pool_attach_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pool->lock/1 wq_mayday_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex stock_lock irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults irq_context: 0 &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock &mapping->private_lock irq_context: 0 &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &list->lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 l2tp_ip_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 &n->list_lock irq_context: 0 sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock key#22 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &c->lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_NONE#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &meta->lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 krc.lock irq_context: 0 sk_lock-AF_INET6 krc.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 krc.lock &base->lock irq_context: 0 sk_lock-AF_INET6 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback key#22 irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &list->lock#21 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &mapping->private_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 (wq_completion)events (work_completion)(&aux->work) stock_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &rq->__lock irq_context: 0 sb_writers#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &list->lock#21 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rlock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 l2tp_ip_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 &fsnotify_mark_srcu rcu_read_lock rcu_node_0 irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &n->list_lock irq_context: 0 &sighand->siglock &n->list_lock &c->lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 nfnl_subsys_ipset fs_reclaim irq_context: 0 nfnl_subsys_ipset fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ipset &c->lock irq_context: 0 nfnl_subsys_ipset pool_lock#2 irq_context: 0 nfnl_subsys_ipset &obj_hash[i].lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &group->mark_mutex &n->list_lock irq_context: 0 &group->mark_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#6 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &q->instances_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_node_0 irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &____s->seqcount#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 &u->iolock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#4 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &meta->lock irq_context: 0 sb_writers#4 kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_internal kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &rq->__lock irq_context: 0 &disk->open_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock kfence_freelist_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock key irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &c->lock irq_context: 0 sb_writers#4 sb_internal rcu_node_0 irq_context: 0 sb_writers#4 sb_internal rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex freezer_mutex.wait_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &c->lock irq_context: softirq (&peer->timer_persistent_keepalive) &n->list_lock irq_context: softirq (&peer->timer_persistent_keepalive) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 lock pidmap_lock &____s->seqcount#2 irq_context: 0 lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock pidmap_lock &____s->seqcount irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &____s->seqcount#2 irq_context: 0 &sighand->siglock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rlock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &list->lock#20 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_ep_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 clock-AF_INET irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &c->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 delayed_uprobe_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock quarantine_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&peer->timer_retransmit_handshake) irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_node_0 irq_context: 0 cb_lock rcu_read_lock &n->list_lock irq_context: 0 cb_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem key#14 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu quarantine_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &c->lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &n->list_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 purge_vmap_area_lock &meta->lock irq_context: 0 purge_vmap_area_lock kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 lock link_idr_lock irq_context: 0 lock link_idr_lock pool_lock#2 irq_context: 0 tracepoints_mutex irq_context: 0 tracepoints_mutex &rq->__lock irq_context: 0 tracepoints_mutex fs_reclaim irq_context: 0 tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tracepoints_mutex pool_lock#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex text_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &group->notification_waitq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex jump_label_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex jump_label_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 jbd2_handle irq_context: 0 &journal->j_wait_reserved irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_wait_transaction_locked irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier irq_context: 0 &journal->j_barrier &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_state_lock irq_context: 0 &journal->j_barrier jbd2_handle irq_context: 0 &journal->j_barrier &journal->j_wait_commit irq_context: 0 &journal->j_barrier &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_wait_done_commit irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &c->lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &x->wait#26 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &dd->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &dd->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &____s->seqcount irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock pool_lock#2 irq_context: 0 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 quarantine_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) batched_entropy_u8.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex (&timer.timer) irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &ei->i_es_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &ei->i_es_lock key#2 irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &mapping->private_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &meta->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex kfence_freelist_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 &lruvec->lru_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#5 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &lruvec->lru_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &n->list_lock &c->lock irq_context: 0 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle remove_cache_srcu irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 link_idr_lock &obj_hash[i].lock irq_context: 0 link_idr_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex (console_sem).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tracepoint_srcu_srcu_usage.lock irq_context: softirq (&sdp->delay_work) irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rlock-AF_NETLINK irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &base->lock irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx stock_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &ep->mtx wakeup_ida.xa_lock irq_context: 0 &ep->mtx &x->wait#9 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &ep->mtx &k->list_lock irq_context: 0 &ep->mtx gdp_mutex irq_context: 0 &ep->mtx gdp_mutex &k->list_lock irq_context: 0 &ep->mtx gdp_mutex fs_reclaim irq_context: 0 &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx gdp_mutex pool_lock#2 irq_context: 0 &ep->mtx gdp_mutex &c->lock irq_context: 0 &ep->mtx gdp_mutex lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 &ep->mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 &ep->mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &ep->mtx lock irq_context: 0 &ep->mtx lock kernfs_idr_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem irq_context: 0 &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &ep->mtx bus_type_sem irq_context: 0 &ep->mtx sysfs_symlink_target_lock irq_context: 0 &ep->mtx uevent_sock_mutex irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &ep->mtx uevent_sock_mutex remove_cache_srcu irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 &ep->mtx uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 &ep->mtx uevent_sock_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &ep->mtx uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 &ep->mtx uevent_sock_mutex nl_table_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 &ep->mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &ep->mtx subsys mutex#15 irq_context: 0 &ep->mtx subsys mutex#15 &k->k_lock irq_context: 0 &ep->mtx events_lock irq_context: 0 &ep->mtx &dentry->d_lock irq_context: 0 &ep->mtx rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx &n->list_lock irq_context: 0 &ep->mtx &n->list_lock &c->lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 &ep->mtx &____s->seqcount#2 irq_context: 0 &ep->mtx per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &ep->mtx stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &ep->mtx &u->lock irq_context: 0 &ep->mtx &u->lock &u->peer_wait irq_context: 0 base_sockets.lock irq_context: 0 sb_writers#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &ep->mtx &ep->lock &ws->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &sctp_ep_hashtable[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &lock->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_wait_transaction_locked irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex batched_entropy_u8.lock irq_context: softirq &x->wait#26 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#8 &rq->__lock irq_context: 0 sb_writers#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu pool_lock#2 irq_context: 0 tracepoints_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &u->lock &ei->socket.wq.wait &ep->lock irq_context: 0 &ep->mtx &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ep->mtx wakeup_srcu irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &ep->mtx &x->wait#3 irq_context: 0 &ep->mtx (&ws->timer) irq_context: 0 &ep->mtx &base->lock irq_context: 0 &ep->mtx &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &ep->mtx &root->kernfs_rwsem pool_lock#2 irq_context: 0 &ep->mtx subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 &ep->mtx deferred_probe_mutex irq_context: 0 &ep->mtx device_links_lock irq_context: 0 &ep->mtx mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx &ws->lock irq_context: 0 &ep->mtx &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ws->lock &obj_hash[i].lock irq_context: 0 &ep->mtx deleted_ws.lock irq_context: 0 events_lock irq_context: 0 wakeup_srcu irq_context: 0 wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&ws->timer) irq_context: 0 subsys mutex#15 irq_context: 0 subsys mutex#15 &k->k_lock irq_context: 0 subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 uevent_sock_mutex pool_lock#2 irq_context: 0 uevent_sock_mutex nl_table_lock irq_context: 0 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 uevent_sock_mutex nl_table_wait.lock irq_context: 0 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 gdp_mutex sysfs_symlink_target_lock irq_context: 0 gdp_mutex &obj_hash[i].lock irq_context: 0 &ws->lock irq_context: 0 deleted_ws.lock irq_context: 0 wakeup_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 base_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_ISDN irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock crngs.lock irq_context: 0 event_mutex irq_context: 0 event_mutex sched_register_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex fs_reclaim irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex pool_lock#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 event_mutex tracepoints_mutex irq_context: 0 event_mutex tracepoints_mutex fs_reclaim irq_context: 0 event_mutex tracepoints_mutex fs_reclaim &rq->__lock irq_context: 0 event_mutex tracepoints_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 event_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 event_mutex tracepoints_mutex pool_lock#2 irq_context: 0 event_mutex tracepoints_mutex &rq->__lock irq_context: 0 event_mutex tracepoints_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex &rq->__lock irq_context: 0 event_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_wait_transaction_locked irq_context: 0 tracepoints_mutex fs_reclaim &rq->__lock irq_context: 0 tracepoints_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &obj_hash[i].lock irq_context: 0 &iint->mutex ima_extend_list_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: softirq (&peer->timer_send_keepalive) irq_context: softirq (&peer->timer_send_keepalive) pool_lock#2 irq_context: softirq (&peer->timer_send_keepalive) &list->lock#14 irq_context: softirq (&peer->timer_send_keepalive) tk_core.seq.seqcount irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &base->lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_node_0 irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_NETLINK &rq->__lock irq_context: 0 sk_lock-AF_NETLINK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &bgl->locks[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu pool_lock#2 irq_context: 0 &ep->mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 lock prog_idr_lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &q->lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock link_idr_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &ep->mtx &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &ep->mtx kernfs_idr_lock irq_context: 0 gdp_mutex kernfs_idr_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_node_0 irq_context: 0 &xt[i].mutex remove_cache_srcu pool_lock#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock &q->lock &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &xt[i].mutex rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_devs_lock irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 rtnl_mutex &ul->lock irq_context: 0 rtnl_mutex &net->xdp.lock irq_context: 0 rtnl_mutex mirred_list_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &idev->mc_query_lock irq_context: 0 rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 rtnl_mutex &idev->mc_report_lock irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock irq_context: 0 rtnl_mutex &pnn->routes.lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 rtnl_mutex deferred_probe_mutex irq_context: 0 rtnl_mutex device_links_lock irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: hardirq rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &x->wait#24 irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dev_base_lock irq_context: 0 lweventlist_lock irq_context: 0 krc.lock irq_context: 0 &dir->lock#2 irq_context: 0 &dir->lock#2 &obj_hash[i].lock irq_context: 0 &dir->lock#2 pool_lock#2 irq_context: 0 netdev_unregistering_wq.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock krc.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex &tbl->lock krc.lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: softirq rcu_callback &c->lock irq_context: softirq rcu_callback &n->list_lock irq_context: softirq rcu_callback &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex remove_cache_srcu irq_context: 0 &group->mark_mutex remove_cache_srcu quarantine_lock irq_context: 0 &group->mark_mutex remove_cache_srcu &c->lock irq_context: 0 &group->mark_mutex remove_cache_srcu &n->list_lock irq_context: 0 &group->mark_mutex remove_cache_srcu &rq->__lock irq_context: 0 &group->mark_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &group->mark_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_LOOPBACK#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &base->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex __ip_vs_mutex irq_context: 0 rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 rtnl_mutex flowtable_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem stock_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &hashinfo->lock irq_context: 0 lweventlist_lock pool_lock#2 irq_context: 0 lweventlist_lock &dir->lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &cfs_rq->removed.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_sockopt_mutex nf_sockopt_mutex.wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 nf_sockopt_mutex.wait_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock irq_context: 0 lock link_idr_lock &____s->seqcount#2 irq_context: 0 lock link_idr_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback stock_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &obj_hash[i].lock pool_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex (kmod_concurrent_max).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &x->wait#17 irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_barrier irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sem->waiters irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rsp->gp_wait &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rsp->gp_wait pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 tcpv6_prot_mutex irq_context: 0 sk_lock-AF_INET6 device_spinlock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &obj_hash[i].lock irq_context: softirq rcu_callback &rsp->gp_wait irq_context: 0 rtnl_mutex rcu_read_lock &br->hash_lock irq_context: 0 rtnl_mutex rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex rcu_read_lock &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &br->hash_lock &c->lock irq_context: 0 cb_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex fs_reclaim &rq->__lock irq_context: 0 &iint->mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss quarantine_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK slock-AF_NETLINK irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 &pipe->mutex/1 slock-AF_NETLINK irq_context: 0 &pipe->mutex/1 free_vmap_area_lock irq_context: 0 &pipe->mutex/1 vmap_area_lock irq_context: 0 &pipe->mutex/1 init_mm.page_table_lock irq_context: 0 &pipe->mutex/1 &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &zone->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount#7 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem kthread_create_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &x->wait irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &x->wait#21 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (&timer.timer) irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock nl_table_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem fs_reclaim irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem pool_lock#2 irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &c->lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem kthread_create_lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &rq->__lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &x->wait irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback stock_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &x->wait#21 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle key#4 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_node_0 irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 krc.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&p->forward_delay_timer) irq_context: softirq (&p->forward_delay_timer) &br->lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock pool_lock#2 irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock nl_table_lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 rtnl_mutex &br->lock deferred_lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock (console_sem).lock irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &br->lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &dir->lock#2 irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock deferred_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &br->multicast_lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&p->rexmit_timer) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&p->timer) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) krc.lock irq_context: softirq (&mp->timer) irq_context: softirq (&mp->timer) &br->multicast_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&mp->timer) irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex (&pmctx->ip6_mc_router_timer) irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (&pmctx->ip4_mc_router_timer) irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock &sighand->siglock irq_context: 0 &pipe->mutex/1 &sighand->siglock irq_context: 0 &pipe->mutex/1 &sighand->siglock stock_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock pool_lock#2 irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 rtnl_mutex &ht->mutex irq_context: 0 rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 rtnl_mutex &br->multicast_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &xa->xa_lock#3 &c->lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 &____s->seqcount#2 irq_context: 0 rtnl_mutex &xa->xa_lock#3 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &rnp->exp_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex &rnp->exp_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex rcu_state.exp_mutex irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 bit_wait_table + i irq_context: 0 sb_writers#5 rcu_node_0 irq_context: 0 cb_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock rcu_read_lock &____s->seqcount irq_context: 0 cb_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex wq_mayday_lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &rq->__lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock irq_context: 0 rtnl_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex &pool->lock/1 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &____s->seqcount irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 &pipe->mutex/1 &sem->wait_lock irq_context: 0 &pipe->mutex/1 &p->pi_lock irq_context: 0 &pipe->mutex/1 &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM slock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM wlock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &list->lock#22 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM nr_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM rlock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_NETROM irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &base->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex remove_cache_srcu irq_context: 0 &net->xfrm.xfrm_cfg_mutex remove_cache_srcu quarantine_lock irq_context: softirq &x->lock irq_context: softirq (&x->rtimer) irq_context: softirq (&x->rtimer) &x->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#23 bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &base->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &____s->seqcount irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &base->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &c->lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &n->list_lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 uts_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &bgl->locks[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 lock prog_idr_lock batched_entropy_u8.lock irq_context: 0 lock prog_idr_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &iint->mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 sb_internal &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 sk_lock-AF_INET6 stock_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 sk_lock-AF_INET6 &f->f_owner.lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 &n->list_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 &n->list_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &f->f_owner.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &f->f_owner.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fastopen_seqlock.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &f->f_owner.lock rcu_read_lock rcu_read_lock &sighand->siglock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &f->f_owner.lock rcu_read_lock &sighand->siglock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#16 irq_context: 0 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 hugetlb_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &____s->seqcount irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &base->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &____s->seqcount irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] stock_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &mm->page_table_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &resv_map->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] hugetlb_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] hugetlb_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] ptlock_ptr(page) irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET6 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 lock#5 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 &lruvec->lru_lock irq_context: 0 smc_v4_hashinfo.lock irq_context: 0 sk_lock-AF_SMC irq_context: 0 sk_lock-AF_SMC slock-AF_SMC irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &rq->__lock irq_context: 0 sk_lock-AF_SMC k-slock-AF_INET irq_context: 0 slock-AF_SMC irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key ptlock_ptr(page) irq_context: 0 &mm->mmap_lock hugetlb_lock irq_context: 0 &mm->mmap_lock &resv_map->lock irq_context: 0 &mm->mmap_lock &resv_map->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &resv_map->lock pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#13 mount_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#13 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#13 &sb->s_type->i_lock_key#16 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#13 &wb->list_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#13 &wb->list_lock &sb->s_type->i_lock_key#16 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 &lruvec->lru_lock irq_context: 0 &resv_map->lock irq_context: 0 &mm->mmap_lock lock#5 &lruvec->lru_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &sem->wait_lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#5 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&smc->connect_work) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC slock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC smc_v4_hashinfo.lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC clock-AF_SMC irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &n->list_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &n->list_lock &c->lock irq_context: 0 sb_writers#4 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->alloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &list->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock irq_context: 0 sb_writers#4 &ei->xattr_sem &rq->__lock irq_context: 0 sb_writers#4 &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 dup_mmap_sem &rq->__lock irq_context: 0 dup_mmap_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key ptlock_ptr(page) irq_context: 0 &mm->mmap_lock hugetlb_lock irq_context: 0 &u->iolock stock_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu pool_lock#2 irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock rlock-AF_PACKET irq_context: 0 &u->iolock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->bind_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bond->mode_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_writers#4 &wb->list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->pg_vec_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->pg_vec_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock_bh icmp_global.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh icmp_global.lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock rcu_node_0 irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->mii_work)->timer irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#2 irq_context: 0 rtnl_mutex (work_completion)(&(&bond->mii_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->arp_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->alb_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->ad_work)->work) irq_context: 0 rtnl_mutex &x->wait#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback krc.lock irq_context: 0 &u->iolock &mm->mmap_lock &sem->wait_lock irq_context: softirq rcu_callback stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &bsd_socket_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &dentry->d_lock &lru->node[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_RXRPC irq_context: 0 &sb->s_type->i_mutex_key#10 (wq_completion)krxrpcd irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->slave_arr_work)->work) irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 krc.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex rcu_node_0 irq_context: 0 rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 krc.lock irq_context: 0 &u->iolock &mm->mmap_lock &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &bond->mode_lock &c->lock irq_context: 0 rtnl_mutex &bond->mode_lock pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock (console_sem).lock irq_context: 0 rtnl_mutex &bond->mode_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &bond->mode_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &bond->mode_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &bond->mode_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock genl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &batadv_netdev_xmit_lock_key irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ul->lock#2 irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex (work_completion)(&(&slave->notify_work)->work) irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: softirq hrtimer_bases.lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq hrtimer_bases.lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &x->wait#10 irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_RXRPC irq_context: 0 map_idr_lock &obj_hash[i].lock irq_context: 0 map_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) stock_lock irq_context: softirq rcu_callback rlock-AF_RXRPC irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#7 stock_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem lock#5 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &n->list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &____s->seqcount irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock &____s->seqcount irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key krc.lock irq_context: 0 &bat_priv->forw_bcast_list_lock irq_context: 0 &bat_priv->forw_bat_list_lock irq_context: 0 &bat_priv->gw.list_lock irq_context: 0 (work_completion)(&(&bat_priv->bat_v.ogm_wq)->work) irq_context: 0 &bat_priv->bat_v.ogm_buff_mutex irq_context: 0 &bat_priv->bat_v.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 &bat_priv->bat_v.ogm_buff_mutex pool_lock#2 irq_context: 0 &bat_priv->tvlv.container_list_lock irq_context: 0 &bat_priv->tvlv.handler_list_lock irq_context: 0 (work_completion)(&(&bat_priv->nc.work)->work) irq_context: 0 key#17 irq_context: 0 key#18 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 (work_completion)(&(&bat_priv->dat.work)->work) irq_context: 0 &hash->list_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pcpu_alloc_mutex rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &base->lock irq_context: 0 &u->iolock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &mm->mmap_lock sb_writers#4 &c->lock irq_context: 0 (work_completion)(&(&bat_priv->bla.work)->work) irq_context: 0 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 key#21 irq_context: 0 (work_completion)(&(&bat_priv->mcast.work)->work) irq_context: 0 (work_completion)(&(&bat_priv->tt.work)->work) irq_context: 0 key#16 irq_context: 0 key#20 irq_context: 0 &bat_priv->tt.req_list_lock irq_context: 0 &bat_priv->tt.changes_list_lock irq_context: 0 &bat_priv->tt.roam_list_lock irq_context: 0 (work_completion)(&(&bat_priv->orig_work)->work) irq_context: 0 key#19 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 rtnl_mutex slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback kfence_freelist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 &u->peer_wait &p->pi_lock irq_context: 0 &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock &rq->__lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &base->lock &obj_hash[i].lock irq_context: 0 lock map_idr_lock &c->lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &u->iolock &sem->wait_lock irq_context: 0 &u->iolock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->waiters irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#14 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tfile->socket.wq.wait irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&strp->work) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &fsnotify_mark_srcu &____s->seqcount#2 irq_context: 0 &fsnotify_mark_srcu &____s->seqcount irq_context: 0 sb_writers#4 &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) quarantine_lock irq_context: 0 rtnl_mutex &tun->lock irq_context: 0 rtnl_mutex wlock-AF_UNSPEC irq_context: 0 rtnl_mutex elock-AF_UNSPEC irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_NETROM irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &ul->lock#2 pool_lock#2 irq_context: 0 rtnl_mutex &ul->lock#2 &dir->lock#2 irq_context: 0 rtnl_mutex &ndev->lock &base->lock irq_context: 0 rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rnp->exp_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tun->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &f->f_pos_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &____s->seqcount irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 misc_mtx &____s->seqcount#2 irq_context: 0 misc_mtx &n->list_lock irq_context: 0 misc_mtx &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem stock_lock irq_context: 0 pernet_ops_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex stock_lock irq_context: 0 misc_mtx remove_cache_srcu irq_context: 0 misc_mtx remove_cache_srcu quarantine_lock irq_context: 0 misc_mtx remove_cache_srcu &c->lock irq_context: 0 misc_mtx remove_cache_srcu &n->list_lock irq_context: 0 misc_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 misc_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 misc_mtx remove_cache_srcu &rq->__lock irq_context: 0 misc_mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#13 irq_context: 0 &mm->mmap_lock &xa->xa_lock#13 pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &s->s_inode_list_lock irq_context: 0 &mm->mmap_lock batched_entropy_u32.lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#13 irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#13 pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 &rq->__lock irq_context: 0 &type->s_umount_key#22/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#22/1 stock_lock irq_context: 0 &type->s_umount_key#22/1 &n->list_lock irq_context: 0 &type->s_umount_key#22/1 &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#22/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem kfence_freelist_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex &n->list_lock irq_context: 0 &iint->mutex ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &mq_lock irq_context: 0 (wq_completion)events free_ipc_work irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock mount_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work mount_lock irq_context: 0 (wq_completion)events free_ipc_work mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 shrinker_rwsem irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rename_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &sb->s_type->i_lock_key#20 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &s->s_inode_list_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &xa->xa_lock#7 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 sb_lock irq_context: 0 (wq_completion)events free_ipc_work unnamed_dev_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work list_lrus_mutex irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#13 irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#13 &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#13 pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sb_lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work mnt_id_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work &ids->rwsem irq_context: 0 (wq_completion)events free_ipc_work (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work percpu_counters_lock irq_context: 0 (wq_completion)events free_ipc_work pcpu_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock irq_context: 0 (wq_completion)events free_ipc_work proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work stock_lock irq_context: 0 (wq_completion)netns irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->nsid_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &tn->node_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ebt_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem netns_bpf_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->fs_probe_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->cells_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->cells_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem bit_wait_table + i irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock irq_context: 0 (wq_completion)afs irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &net->cells_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->fs_timer) irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &(&net->fs_lock)->lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->incoming_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &call->waitq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_read_lock &call->notify_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC (rxrpc_call_limiter).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->recvmsg_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->call_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->call_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC (&call->timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &list->lock#23 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)kafsd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &local->services_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)krxrpcd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rlock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait irq_context: 0 &rxnet->local_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&local->client_conn_reap_timer) irq_context: 0 &rxnet->conn_lock irq_context: 0 &table->hash[i].lock irq_context: 0 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 k-clock-AF_INET6 irq_context: 0 &list->lock#24 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xa->xa_lock#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex (work_completion)(&data->gc_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_connlabels_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&ovs_net->dp_notify_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &srv->idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&rxnet->peer_keepalive_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#13 &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ptype_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&tn->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &tn->nametbl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&c->work)->work) irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)krdsd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rds_tcp_conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem loop_conns_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)l2tp irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&rxnet->service_conn_reap_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->service_conn_reaper) irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->service_conn_reaper) &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex bpf_devs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex class irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->xdp.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mirred_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_report_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->routes.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex target_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_NONE irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex deferred_probe_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pid_caches_mutex slab_mutex &____s->seqcount#2 irq_context: 0 pid_caches_mutex slab_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem lweventlist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem netdev_unregistering_wq.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_SIT irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &tn->node_list_lock irq_context: 0 pernet_ops_rwsem ebt_mutex irq_context: 0 pernet_ops_rwsem &xt[i].mutex irq_context: 0 pernet_ops_rwsem &nft_net->commit_mutex irq_context: 0 pernet_ops_rwsem netns_bpf_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex class irq_context: 0 pernet_ops_rwsem rtnl_mutex (&tbl->proxy_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ul->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->xdp.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mirred_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ent->pde_unload_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ndev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_report_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->routes.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex target_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex deferred_probe_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex device_links_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 irq_context: 0 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 pool_lock#2 irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 kfence_freelist_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &x->wait#24 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_base_lock irq_context: 0 pernet_ops_rwsem lweventlist_lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 pool_lock#2 irq_context: 0 pernet_ops_rwsem netdev_unregistering_wq.lock irq_context: 0 pernet_ops_rwsem (work_completion)(&ht->run_work) irq_context: 0 pernet_ops_rwsem &ht->mutex irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &ht->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &ht->mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &ht->mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_node_0 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem napi_hash_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_ETHER irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &info->lock irq_context: 0 pernet_ops_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &fn->fou_lock irq_context: 0 key#23 irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_radio_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_IPGRE irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem rdma_nets.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem devices_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &nlk->wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_LOOPBACK irq_context: 0 &mm->mmap_lock sb_pagefaults batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock sb_pagefaults kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &meta->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &fn->fou_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hn->hn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)inet_frag_wq irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &this->info_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnetids_ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 &list->lock#20 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->sync_mutex irq_context: 0 pernet_ops_rwsem hwsim_radio_lock irq_context: 0 pernet_ops_rwsem &ent->pde_unload_lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem rdma_nets.xa_lock irq_context: 0 pernet_ops_rwsem k-clock-AF_NETLINK irq_context: 0 pernet_ops_rwsem &nlk->wait irq_context: 0 pernet_ops_rwsem wlock-AF_NETLINK irq_context: 0 pernet_ops_rwsem &xa->xa_lock#7 irq_context: 0 pernet_ops_rwsem &fsnotify_mark_srcu irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 pool_lock#2 irq_context: 0 kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_LOOPBACK irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &hn->hn_lock irq_context: 0 pernet_ops_rwsem sysctl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem sysctl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &this->info_list_lock irq_context: 0 pernet_ops_rwsem &pnettable->lock irq_context: 0 pernet_ops_rwsem &pnetids_ndev->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 &list->lock#20 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 pool_lock#2 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rds_sock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_RDS irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_recv_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_cong_monitor_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_cong_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_rdma_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &q->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rds_sock_lock irq_context: 0 &dir->lock &obj_hash[i].lock irq_context: 0 &dir->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &dir->lock &obj_hash[i].lock irq_context: softirq rcu_callback &dir->lock &____s->seqcount irq_context: softirq rcu_callback &dir->lock &pcp->lock &zone->lock irq_context: softirq rcu_callback &dir->lock pool_lock#2 irq_context: softirq rcu_callback &dir->lock rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 key#23 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock kfence_freelist_lock irq_context: softirq (&peer->timer_send_keepalive) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (debug_obj_work).work quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 &disk->open_mutex &lock->wait_lock irq_context: 0 &sighand->siglock batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_lock_key#27 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 &dentry->d_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#14 mount_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#14 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#14 &sb->s_type->i_lock_key#27 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#14 &wb->list_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#14 &wb->list_lock &sb->s_type->i_lock_key#27 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &mapping->private_lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &pl->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &pl->lock key#11 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET elock-AF_INET irq_context: 0 &xt[i].mutex &pcp->lock &zone->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 pernet_ops_rwsem k-slock-AF_INET irq_context: 0 pernet_ops_rwsem k-slock-AF_INET &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-slock-AF_INET pool_lock#2 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET elock-AF_INET irq_context: 0 pernet_ops_rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock kfence_freelist_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &meta->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->gssp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &cd->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem cache_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem cache_list_lock &cd->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->can.stattimer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem xfrm_state_gc_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->xfrm.xfrm_state_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ip6_fl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->rules_mod_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem percpu_counters_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->ipv6.ip6_fib_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&mrt->ipmr_expire_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&ipvs->dest_trash_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem recent_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hashlimit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nf_connlabels_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem tcp_metrics_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->xfrm.xfrm_policy_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.state_hash_work) irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xa->xa_lock#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem genl_sk_destructing_waitq.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait#10 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu quarantine_lock irq_context: softirq (&peer->timer_persistent_keepalive) batched_entropy_u8.lock irq_context: softirq (&peer->timer_persistent_keepalive) kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &sn->gssp_lock irq_context: 0 pernet_ops_rwsem &cd->hash_lock irq_context: 0 pernet_ops_rwsem cache_list_lock &cd->hash_lock irq_context: 0 pernet_ops_rwsem (&net->can.stattimer) irq_context: 0 pernet_ops_rwsem xfrm_state_gc_work irq_context: 0 pernet_ops_rwsem &net->xfrm.xfrm_state_lock irq_context: 0 pernet_ops_rwsem &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock irq_context: 0 pernet_ops_rwsem ip6_fl_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem (&net->ipv6.ip6_fib_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex (&mrt->ipmr_expire_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem __ip_vs_mutex irq_context: 0 pernet_ops_rwsem (&ipvs->dest_trash_timer) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 pernet_ops_rwsem nfnl_subsys_ipset irq_context: 0 pernet_ops_rwsem recent_lock irq_context: 0 pernet_ops_rwsem hashlimit_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 pernet_ops_rwsem rtnl_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem tcp_metrics_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) irq_context: 0 pernet_ops_rwsem &net->xfrm.xfrm_policy_lock irq_context: 0 pernet_ops_rwsem (work_completion)(&net->xfrm.state_hash_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem &list->lock#2 irq_context: 0 pernet_ops_rwsem &xa->xa_lock#3 irq_context: 0 pernet_ops_rwsem genl_sk_destructing_waitq.lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_node_0 irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock fs_reclaim irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex pool_lock#2 irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock hwsim_radio_lock init_task.mems_allowed_seq.seqcount irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sem->wait_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#3 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: hardirq rcu_state.barrier_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: hardirq rcu_state.barrier_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 &zone->lock irq_context: 0 sk_lock-AF_INET6 &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock (work_completion)(flush) irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &x->wait#10 irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_highpri (work_completion)(flush) irq_context: 0 (wq_completion)events_highpri (work_completion)(flush) &list->lock#5 irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex netpoll_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pn->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_IPGRE#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sch->q.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex __ip_vs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &im->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex flowtable_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_IPGRE irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_IPGRE &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_IPGRE pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_IPGRE krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &child->perf_event_mutex &rq->__lock irq_context: 0 &child->perf_event_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->routes.lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->routes.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &base->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sem->wait_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &sem->wait_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 purge_vmap_area_lock quarantine_lock irq_context: 0 &iint->mutex &p->alloc_lock irq_context: 0 &iint->mutex &list->lock irq_context: 0 &iint->mutex kauditd_wait.lock irq_context: 0 &iint->mutex kauditd_wait.lock &p->pi_lock irq_context: 0 &iint->mutex kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_SIT irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&ndev->rs_timer) batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) kfence_freelist_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xa->xa_lock#3 pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work &base->lock irq_context: 0 (wq_completion)events fqdir_free_work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem key irq_context: 0 pernet_ops_rwsem pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &base->lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_node_0 irq_context: 0 namespace_sem stock_lock irq_context: 0 namespace_sem &____s->seqcount#2 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &pl->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &pl->lock key#11 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &wb->work_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 key#11 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &xa->xa_lock#13 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#6 stock_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &xa->xa_lock#13 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &xa->xa_lock#13 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem stock_lock irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rename_lock.seqcount irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#10 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &dentry->d_lock irq_context: 0 sb_writers#10 tomoyo_ss irq_context: 0 sb_writers#10 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 tomoyo_ss &c->lock irq_context: 0 sb_writers#10 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#10 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#10 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#10 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#10 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#10 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#10 kn->active#58 fs_reclaim irq_context: 0 sb_writers#10 kn->active#58 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 kn->active#58 remove_cache_srcu irq_context: 0 sb_writers#10 kn->active#58 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#10 kn->active#58 remove_cache_srcu &c->lock irq_context: 0 sb_writers#10 kn->active#58 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#10 kn->active#58 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 kn->active#58 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#10 kn->active#58 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#10 kn->active#58 stock_lock irq_context: 0 sb_writers#10 kn->active#58 &rq->__lock irq_context: 0 sb_writers#10 kn->active#58 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 sb_writers#10 kn->active#58 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#10 kn->active#58 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#10 kn->active#58 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &sb->s_type->i_mutex_key#15 irq_context: 0 sb_writers#10 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#10 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 iattr_mutex irq_context: 0 sb_writers#10 &sb->s_type->i_mutex_key#15 tk_core.seq.seqcount irq_context: 0 sb_writers#10 &sb->s_type->i_mutex_key#15 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &sb->s_type->i_mutex_key#15 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#10 &sb->s_type->i_mutex_key#15 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#10 &sb->s_type->i_mutex_key#15 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &sb->s_type->i_mutex_key#15 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 bit_wait_table + i irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex css_set_lock cgroup_file_kn_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex css_set_lock cgroup_file_kn_lock kernfs_notify_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle remove_cache_srcu &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex pool_lock#2 irq_context: 0 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[3] irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 kn->active#58 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#10 kn->active#58 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#10 kn->active#58 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#10 &xattrs->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xa->xa_lock#7 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &xa->xa_lock#7 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &fsnotify_mark_srcu irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-clock-AF_RXRPC irq_context: 0 pernet_ops_rwsem (wq_completion)krxrpcd irq_context: 0 pernet_ops_rwsem &wq->mutex irq_context: 0 pernet_ops_rwsem &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem &wq->mutex &x->wait#10 irq_context: 0 pernet_ops_rwsem &wq->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rlock-AF_RXRPC irq_context: 0 pernet_ops_rwsem (&net->fs_probe_timer) irq_context: 0 pernet_ops_rwsem &net->cells_lock irq_context: 0 pernet_ops_rwsem (&net->cells_timer) irq_context: 0 pernet_ops_rwsem bit_wait_table + i irq_context: 0 pernet_ops_rwsem (&net->fs_timer) irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex (work_completion)(&data->gc_work) irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem ovs_mutex net_rwsem irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 pernet_ops_rwsem (work_completion)(&ovs_net->dp_notify_work) irq_context: 0 pernet_ops_rwsem &srv->idr_lock irq_context: 0 pernet_ops_rwsem (&rxnet->peer_keepalive_timer) irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem &pool->lock/1 irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem (&rxnet->service_conn_reap_timer) irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem (work_completion)(&tn->work) irq_context: 0 pernet_ops_rwsem &rnp->exp_lock irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[3] irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &tn->nametbl_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem (work_completion)(&(&c->work)->work) irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem (wq_completion)krdsd irq_context: 0 pernet_ops_rwsem (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 pernet_ops_rwsem rds_tcp_conn_lock irq_context: 0 pernet_ops_rwsem loop_conns_lock irq_context: 0 pernet_ops_rwsem (wq_completion)l2tp irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_NONE irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#6 irq_context: 0 cb_lock &devlink->lock_key#6 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 &disk->open_mutex &lo->lo_mutex &rq->__lock irq_context: 0 &disk->open_mutex &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#3 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 pool_lock#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 krc.lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/2 irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/2 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/2 krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/2 krc.lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/2 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (work_completion)(&port->bc_work) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex.wait_lock irq_context: 0 cb_lock quarantine_lock irq_context: 0 cb_lock remove_cache_srcu irq_context: 0 cb_lock remove_cache_srcu quarantine_lock irq_context: 0 cb_lock remove_cache_srcu &c->lock irq_context: 0 cb_lock remove_cache_srcu &n->list_lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[3] irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &base->lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 pernet_ops_rwsem &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &dd->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle tk_core.seq.seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#4 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET slock-AF_PACKET &sk->sk_lock.wq irq_context: 0 sk_lock-AF_PACKET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_PACKET &sk->sk_lock.wq irq_context: 0 slock-AF_PACKET &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_PACKET &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_PACKET &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 &c->lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &group->mark_mutex rcu_read_lock &rq->__lock irq_context: 0 &group->mark_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &rq->__lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &base->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mount_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &c->lock irq_context: 0 namespace_sem pcpu_alloc_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PHONET irq_context: 0 sk_lock-AF_PHONET &rq->__lock irq_context: 0 sk_lock-AF_PHONET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PHONET slock-AF_PHONET irq_context: 0 sk_lock-AF_PHONET port_mutex#2 irq_context: 0 sk_lock-AF_PHONET port_mutex#2 local_port_range_lock.seqcount irq_context: 0 sk_lock-AF_PHONET port_mutex#2 &pnsocks.lock irq_context: 0 slock-AF_PHONET irq_context: 0 rlock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET slock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &pnsocks.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET resource_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET resource_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET resource_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#25 irq_context: 0 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_INET6 &dir->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &dir->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &c->lock irq_context: 0 sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &token_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &msk->pm.lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET6 irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock lock#4 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events free_ipc_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 tomoyo_ss mount_lock irq_context: 0 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mount_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 fs_reclaim irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock &dd->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 mount_lock irq_context: 0 sb_writers#4 sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_writers#4 pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 namespace_sem remove_cache_srcu irq_context: 0 namespace_sem remove_cache_srcu quarantine_lock irq_context: 0 namespace_sem remove_cache_srcu &c->lock irq_context: 0 namespace_sem remove_cache_srcu &n->list_lock irq_context: 0 namespace_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 namespace_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 namespace_sem remove_cache_srcu &rq->__lock irq_context: 0 namespace_sem rcu_read_lock rcu_node_0 irq_context: 0 namespace_sem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)events &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sem->wait_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 ebt_mutex &mm->mmap_lock &rq->__lock irq_context: 0 ebt_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex &lock->wait_lock irq_context: 0 sb_writers#4 &iint->mutex &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock key#8 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &ret->b_state_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[2] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &f->f_pos_lock &lock->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &ret->b_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle bit_wait_table + i irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &n->list_lock irq_context: 0 sb_writers#4 &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: softirq _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &bgl->locks[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &br->lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_node_0 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &base->lock irq_context: 0 sb_writers#4 &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex quarantine_lock irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET6 ip6_sk_fl_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &p->lock batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock &p->lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &____s->seqcount irq_context: 0 kn->active#5 &cfs_rq->removed.lock irq_context: 0 kn->active#5 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &ul->lock irq_context: 0 l2tp_ip6_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 cb_lock &dir->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#4 (console_sem).lock irq_context: 0 sb_writers#4 console_owner_lock irq_context: 0 sb_writers#4 console_owner irq_context: 0 sb_writers#4 console_lock console_srcu console_owner_lock irq_context: 0 sb_writers#4 console_lock console_srcu console_owner irq_context: 0 sb_writers#4 console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 console_lock console_srcu console_owner console_owner_lock irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex irq_context: 0 &nft_net->commit_mutex irq_context: 0 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_updates irq_context: 0 sb_writers#4 &journal->j_barrier irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &journal->j_barrier &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &x->wait#26 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &dd->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &c->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex kfence_freelist_lock irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 l2tp_ip6_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &____s->seqcount irq_context: softirq (&timer) rcu_read_lock &____s->seqcount#2 irq_context: softirq (&timer) rcu_read_lock &n->list_lock irq_context: softirq (&timer) rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key nr_node_list_lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &n->list_lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &dir->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) once_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) once_lock crngs.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &n->list_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem crngs.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex &id_priv->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem id_table_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &x->wait#27 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_tcp_conn_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_conn_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_conn_lock rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock clock-AF_INET6 rds_tcp_tc_list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &xa->xa_lock#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &fsnotify_mark_srcu irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 list_mutex irq_context: 0 list_mutex fs_reclaim irq_context: 0 list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 list_mutex &c->lock irq_context: 0 list_mutex pool_lock#2 irq_context: 0 list_mutex lock irq_context: 0 list_mutex lock kernfs_idr_lock irq_context: 0 list_mutex &root->kernfs_rwsem irq_context: 0 list_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 list_mutex &obj_hash[i].lock irq_context: 0 list_mutex &base->lock irq_context: 0 list_mutex &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &rm->m_rs_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-slock-AF_INET6 irq_context: 0 (wq_completion)rcu_gp &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &list->lock#26 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &cp->cp_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &meta->lock irq_context: 0 nfnl_subsys_ctnetlink_exp (console_sem).lock irq_context: 0 nfnl_subsys_ctnetlink_exp console_lock console_srcu console_owner_lock irq_context: 0 nfnl_subsys_ctnetlink_exp console_lock console_srcu console_owner irq_context: 0 nfnl_subsys_ctnetlink_exp console_lock console_srcu console_owner &port_lock_key irq_context: 0 nfnl_subsys_ctnetlink_exp console_lock console_srcu console_owner console_owner_lock irq_context: 0 nfnl_subsys_ctnetlink_exp &rq->__lock irq_context: 0 nfnl_subsys_ctnetlink_exp &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 list_mutex (&info->timer->timer) irq_context: 0 list_mutex (work_completion)(&info->timer->work) irq_context: 0 list_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 list_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 list_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &c->lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &x->wait#10 irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &sd->defer_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 list_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &c->lock irq_context: 0 list_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 list_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 list_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 list_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink_exp rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 clock-AF_INET6 rds_tcp_tc_list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &dir->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &xa->xa_lock#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &fsnotify_mark_srcu irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) (work_completion)(&(&cp->cp_conn_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) (work_completion)(&(&cp->cp_conn_w)->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[0] irq_context: 0 list_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 list_mutex list_mutex.wait_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 list_mutex.wait_lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &rm->m_rs_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key#24 irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &p->alloc_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 key#24 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &list->lock#26 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &sd->defer_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq slock-AF_INET6 &obj_hash[i].lock irq_context: softirq slock-AF_INET6 &base->lock irq_context: softirq slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq slock-AF_INET6 pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &xt[i].mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &xt[i].mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rcu_state.expedited_wq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 batched_entropy_u8.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 kfence_freelist_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &n->list_lock &c->lock irq_context: softirq slock-AF_INET6 &c->lock irq_context: softirq slock-AF_INET6 &____s->seqcount#2 irq_context: softirq slock-AF_INET6 &____s->seqcount irq_context: softirq slock-AF_INET6 &n->list_lock irq_context: softirq slock-AF_INET6 &n->list_lock &c->lock irq_context: 0 tasklist_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &pcp->lock &zone->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex device_links_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex device_links_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (debug_obj_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 quarantine_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &list->lock#26 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#13 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 (wq_completion)events_unbound connector_reaper_work &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex smcd_dev_list.mutex irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 clock-AF_LLC irq_context: 0 sk_lock-AF_LLC irq_context: 0 sk_lock-AF_LLC slock-AF_LLC irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &c->lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock pool_lock#2 irq_context: 0 sk_lock-AF_LLC fs_reclaim irq_context: 0 sk_lock-AF_LLC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_LLC &c->lock irq_context: 0 sk_lock-AF_LLC &n->list_lock irq_context: 0 sk_lock-AF_LLC &n->list_lock &c->lock irq_context: 0 sk_lock-AF_LLC pool_lock#2 irq_context: 0 sk_lock-AF_LLC &dir->lock#2 irq_context: 0 sk_lock-AF_LLC &sap->sk_lock irq_context: 0 sk_lock-AF_LLC wlock-AF_LLC irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_LLC &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC &base->lock irq_context: 0 sk_lock-AF_LLC &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC &ei->socket.wq.wait irq_context: 0 slock-AF_LLC irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC slock-AF_LLC &sk->sk_lock.wq irq_context: 0 sk_lock-AF_LLC &rq->__lock irq_context: 0 sk_lock-AF_LLC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_LLC &sk->sk_lock.wq irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &n->list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[2] irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_node_0 irq_context: 0 sk_lock-AF_LLC quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq irq_context: softirq (&n->timer) rcu_read_lock lock#8 irq_context: softirq (&n->timer) rcu_read_lock id_table_lock irq_context: softirq (&n->timer) pool_lock#2 irq_context: softirq (&n->timer) &n->lock irq_context: softirq (&n->timer) &n->lock &____s->seqcount#9 irq_context: softirq (&n->timer) nl_table_lock irq_context: softirq (&n->timer) &obj_hash[i].lock irq_context: softirq (&n->timer) nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC slock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC &sap->sk_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->pf_cycle_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->ack_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->rej_sent_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->busy_state_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#27 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &n->list_lock irq_context: 0 rcu_read_lock &sighand->siglock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#13 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &n->list_lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pgd_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex key irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex percpu_counters_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pfkey_mutex irq_context: 0 pfkey_mutex &rq->__lock irq_context: 0 pfkey_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (kmod_concurrent_max).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &x->wait#17 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 &sch->q.lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex dev_base_lock &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_base_lock &xa->xa_lock#3 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &bond->ipsec_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex running_helpers_waitq.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex gdp_mutex &c->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 (console_sem).lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 nl_table_lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 nl_table_wait.lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 &macsec_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex &rnp->exp_wq[2] irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex batched_entropy_u8.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex kfence_freelist_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu quarantine_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem kthread_create_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/2 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/2 pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &x->wait irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &x->wait#21 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (&timer.timer) irq_context: 0 rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 &type->i_mutex_dir_key/1 irq_context: 0 &type->i_mutex_dir_key/1 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key/1 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &c->lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &fsnotify_mark_srcu irq_context: 0 &type->i_mutex_dir_key/1 &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key/1 &sbinfo->stat_lock irq_context: 0 &type->i_mutex_dir_key/1 &xa->xa_lock#7 irq_context: 0 &type->i_mutex_dir_key/1 &dentry->d_lock irq_context: 0 rtnl_mutex subsys mutex#82 &k->k_lock klist_remove_lock irq_context: 0 rtnl_mutex gdp_mutex sysfs_symlink_target_lock irq_context: 0 rtnl_mutex gdp_mutex kernfs_idr_lock irq_context: 0 rtnl_mutex gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex krc.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_NETLINK slock-AF_NETLINK &sk->sk_lock.wq irq_context: 0 slock-AF_NETLINK &sk->sk_lock.wq irq_context: 0 slock-AF_NETLINK &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_NETLINK &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_NETLINK &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem quarantine_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &obj_hash[i].lock pool_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex crypto_alg_sem &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rlock-AF_KEY irq_context: 0 &sb->s_type->i_mutex_key#10 pfkey_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_KEY irq_context: 0 &mm->mmap_lock sb_writers#4 &n->list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_KEY irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_KEY irq_context: 0 (wq_completion)bond4 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex vlan_ioctl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &mm->mmap_lock pcpu_lock stock_lock irq_context: 0 pfkey_mutex pfkey_mutex.wait_lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem remove_cache_srcu irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem remove_cache_srcu quarantine_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem remove_cache_srcu &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem remove_cache_srcu &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem remove_cache_srcu &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 &child->perf_event_mutex &cfs_rq->removed.lock irq_context: 0 &child->perf_event_mutex &obj_hash[i].lock irq_context: 0 &child->perf_event_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex.wait_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 rcu_node_0 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 vlan_ioctl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tn->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex netpoll_srcu irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pn->hash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &sch->q.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex quarantine_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &im->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex class irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&tbl->proxy_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dir->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ifa->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 &x->wait#8 &p->pi_lock irq_context: 0 &x->wait#8 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_query_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_base_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &xa->xa_lock &c->lock irq_context: 0 &xa->xa_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex bpf_devs_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->xdp.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex mirred_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &nft_net->commit_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ent->pde_unload_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_report_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->pndevs.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->routes.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnettable->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex target_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &k->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &x->wait#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex dpm_list_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->power.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex deferred_probe_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex device_links_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex gdp_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 vlan_ioctl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex dev_base_lock irq_context: 0 vlan_ioctl_mutex lweventlist_lock irq_context: 0 vlan_ioctl_mutex lweventlist_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex pcpu_lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 irq_context: 0 vlan_ioctl_mutex krc.lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 pool_lock#2 irq_context: 0 vlan_ioctl_mutex netdev_unregistering_wq.lock irq_context: 0 vlan_ioctl_mutex &____s->seqcount irq_context: 0 vlan_ioctl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pgd_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem key irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem percpu_counters_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) pool_lock#2 irq_context: 0 file_rwsem &rq->__lock irq_context: 0 file_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 pfkey_mutex pfkey_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 pfkey_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 pfkey_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex pgd_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex key irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex pcpu_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex percpu_counters_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex nf_conntrack_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &cfs_rq->removed.lock irq_context: 0 rcu_read_lock stock_lock irq_context: 0 rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock pgd_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock key irq_context: 0 pernet_ops_rwsem rcu_read_lock pcpu_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock percpu_counters_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle key#4 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_grp_active_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 &ei->i_data_sem irq_context: 0 &ei->i_data_sem &sem->wait_lock irq_context: 0 &ei->i_data_sem &rq->__lock irq_context: 0 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pgd_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pcpu_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 percpu_counters_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pcpu_lock stock_lock irq_context: 0 &pipe->mutex/1 rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock once_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock once_lock crngs.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock nl_table_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock nl_table_wait.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rlock-AF_NETLINK irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock nl_table_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rlock-AF_NETLINK irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock nl_table_wait.lock &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &cnet->ecache.dying_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &base->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE slock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#28 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE rose_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE wlock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &list->lock#28 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE rlock-AF_ROSE irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex deferred_probe_mutex &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock quarantine_lock irq_context: 0 &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock quarantine_lock irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 nfnl_grp_active_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &bgl->locks[i].lock irq_context: softirq rcu_callback prog_idr_lock irq_context: softirq rcu_callback prog_idr_lock &obj_hash[i].lock irq_context: softirq rcu_callback prog_idr_lock pool_lock#2 irq_context: softirq rcu_callback bpf_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &root->kernfs_rwsem &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &ei->i_data_sem pool_lock#2 irq_context: 0 &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 &root->kernfs_iattr_rwsem rcu_node_0 irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq &(&cnet->ecache.dwork)->timer irq_context: softirq &(&cnet->ecache.dwork)->timer rcu_read_lock &pool->lock irq_context: softirq &(&cnet->ecache.dwork)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&cnet->ecache.dwork)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&cnet->ecache.dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&cnet->ecache.dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock &c->lock irq_context: 0 rtnl_mutex &tbl->lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &tbl->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &n->list_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex _xmit_NETROM#2 irq_context: 0 rtnl_mutex &this->info_list_lock irq_context: 0 &pipe->mutex/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &c->lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &group->mark_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC slock-AF_UNSPEC irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC free_vmap_area_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC vmap_area_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &____s->seqcount irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pcpu_alloc_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pack_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC batched_entropy_u32.lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC text_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &fp->aux->used_maps_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &fp->aux->used_maps_mutex &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &fp->aux->used_maps_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex slock-AF_UNSPEC irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_commit irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_done_commit irq_context: 0 cb_lock genl_mutex fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &journal->j_state_lock &journal->j_list_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &cnet->ecache.dying_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &meta->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &rq->__lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &____s->seqcount irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &app->lock#2 irq_context: 0 rtnl_mutex &app->lock#2 pool_lock#2 irq_context: 0 rtnl_mutex &app->lock irq_context: 0 rtnl_mutex &app->lock pool_lock#2 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/2 &dev_addr_list_lock_key#2/1 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/2 &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: softirq (&app->join_timer)#2 &app->lock#2 pool_lock#2 irq_context: softirq (&app->join_timer)#2 &app->lock#2 &list->lock#12 irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rcu_read_lock nl_table_lock irq_context: 0 rtnl_mutex rcu_read_lock nl_table_wait.lock irq_context: softirq (&app->join_timer) &app->lock pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock &list->lock#11 irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&app->join_timer) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex pack_mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex fs_reclaim irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex pool_lock#2 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex free_vmap_area_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex vmap_area_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &____s->seqcount irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex init_mm.page_table_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex bpf_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex text_mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &p->pi_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &rq->__lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &r->producer_lock#3 irq_context: softirq &r->producer_lock#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &r->producer_lock#3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &sem->wait_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 file_rwsem rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &r->producer_lock#3 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 &iint->mutex mapping.invalidate_lock stock_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_transaction_locked irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &c->lock irq_context: 0 &iint->mutex rcu_node_0 irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: softirq (&app->join_timer) &app->lock &c->lock irq_context: softirq (&app->join_timer) &app->lock &n->list_lock irq_context: softirq (&app->join_timer) &app->lock &n->list_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &c->lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rhashtable_bucket irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock rlock-AF_KEY irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &____s->seqcount#13 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &policy->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &list->lock#29 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &____s->seqcount#13 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock krc.lock irq_context: 0 rtnl_mutex net_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &rnp->exp_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#8 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#8 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#8 tomoyo_ss &meta->lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&cnet->ecache.dwork)->work) &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex net_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 kfence_freelist_lock irq_context: 0 hashlimit_mutex irq_context: 0 hashlimit_mutex fs_reclaim irq_context: 0 hashlimit_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 hashlimit_mutex pool_lock#2 irq_context: 0 hashlimit_mutex &c->lock irq_context: 0 hashlimit_mutex free_vmap_area_lock irq_context: 0 hashlimit_mutex vmap_area_lock irq_context: 0 hashlimit_mutex &____s->seqcount irq_context: 0 hashlimit_mutex init_mm.page_table_lock irq_context: 0 hashlimit_mutex &rq->__lock irq_context: 0 hashlimit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 hashlimit_mutex proc_subdir_lock irq_context: 0 hashlimit_mutex proc_inum_ida.xa_lock irq_context: 0 hashlimit_mutex proc_subdir_lock irq_context: 0 hashlimit_mutex &obj_hash[i].lock irq_context: 0 hashlimit_mutex &base->lock irq_context: 0 hashlimit_mutex &base->lock &obj_hash[i].lock irq_context: 0 hashlimit_mutex &ent->pde_unload_lock irq_context: 0 (work_completion)(&(&hinfo->gc_work)->work) irq_context: 0 &hinfo->lock irq_context: 0 &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ul->lock#2 &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle kfence_freelist_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC clock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_TIPC irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &base->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &lock->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock irq_context: 0 &hdev->lock &conn->chan_lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &rq->__lock irq_context: 0 &hdev->lock &obj_hash[i].lock irq_context: 0 &hdev->lock &base->lock irq_context: 0 &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 &hdev->lock &rq->__lock irq_context: 0 &hdev->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &base->lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 l2cap_sk_list.lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 chan_list_lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 rcu_node_0 irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &ei->socket.wq.wait irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/2 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/2 &ei->socket.wq.wait irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &mm->mmap_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 wlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 chan_list_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_node_0 irq_context: 0 bt_proto_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC free_vmap_area_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC free_vmap_area_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &sem->wait_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &iint->mutex remove_cache_srcu irq_context: 0 &iint->mutex remove_cache_srcu quarantine_lock irq_context: 0 &iint->mutex remove_cache_srcu &c->lock irq_context: 0 &iint->mutex remove_cache_srcu &n->list_lock irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 kfence_freelist_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &meta->lock irq_context: 0 bt_proto_lock &n->list_lock irq_context: 0 bt_proto_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx &rq->__lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &____s->seqcount irq_context: 0 &iint->mutex kauditd_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &meta->lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 bpf_module_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock crngs.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &base->lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &list->lock#16 irq_context: softirq rcu_read_lock lock#6 &kcov->lock irq_context: softirq rcu_read_lock &local->ack_status_lock irq_context: softirq rcu_read_lock &local->ack_status_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->ack_status_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock quarantine_lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 &kcov->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &c->lock irq_context: 0 tracepoints_mutex tasklist_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_send_keepalive) &n->list_lock irq_context: softirq (&peer->timer_send_keepalive) &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex text_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock pgd_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock key irq_context: 0 sk_lock-AF_INET6 rcu_read_lock pcpu_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock percpu_counters_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex (&timer.timer) irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &____s->seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock key#15 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 quarantine_lock irq_context: 0 &xt[i].mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier jbd2_handle irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 &n->list_lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 &n->list_lock &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex fs_reclaim &rq->__lock irq_context: 0 &group->mark_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &folio_wait_table[i] irq_context: 0 tracepoints_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_wait_done_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &sighand->siglock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex &ei->xattr_sem &rq->__lock irq_context: 0 &iint->mutex &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle bit_wait_table + i irq_context: 0 &pipe->mutex/1 rcu_read_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex kfence_freelist_lock irq_context: 0 &tsk->futex_exit_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 sb_internal jbd2_handle &cfs_rq->removed.lock irq_context: 0 &hdev->lock fs_reclaim irq_context: 0 &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &hdev->lock pool_lock#2 irq_context: 0 &hdev->lock &x->wait#9 irq_context: 0 &hdev->lock &c->lock irq_context: 0 &hdev->lock rcu_read_lock rcu_node_0 irq_context: 0 &hdev->lock rcu_read_lock &rq->__lock irq_context: 0 &hdev->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock &list->lock#6 irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &n->list_lock irq_context: 0 &pipe->mutex/1 &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu irq_context: 0 &pipe->mutex/1 remove_cache_srcu quarantine_lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &c->lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &n->list_lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &rq->__lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &pipe->mutex/1 fs_reclaim &rq->__lock irq_context: 0 &pipe->mutex/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 kfence_freelist_lock irq_context: 0 &pipe->mutex/1 &meta->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: softirq (&peer->timer_new_handshake) irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_node_0 irq_context: 0 vsock_table_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock stock_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK clock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rlock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_VSOCK irq_context: 0 &f->f_pos_lock &p->lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 elock-AF_INET6 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex key#16 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &mm->mmap_lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &dir->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 pool_lock#2 irq_context: 0 alg_types_sem irq_context: 0 sk_lock-AF_ALG irq_context: 0 sk_lock-AF_ALG slock-AF_ALG irq_context: 0 sk_lock-AF_ALG &rq->__lock irq_context: 0 slock-AF_ALG irq_context: 0 sk_lock-AF_ALG fs_reclaim irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock irq_context: 0 sk_lock-AF_ALG &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG &dir->lock irq_context: 0 sk_lock-AF_ALG &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &ei->socket.wq.wait irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_ALG &c->lock irq_context: 0 sk_lock-AF_ALG &n->list_lock irq_context: 0 sk_lock-AF_ALG &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG slock-AF_ALG &sk->sk_lock.wq irq_context: 0 slock-AF_ALG &sk->sk_lock.wq irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &folio_wait_table[i] irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)rcu_gp &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu irq_context: 0 sk_lock-AF_ALG remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sk_lock-AF_ALG &____s->seqcount#2 irq_context: 0 sk_lock-AF_ALG &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &c->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 key#13 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock stock_lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_node_0 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 kfence_freelist_lock irq_context: 0 &ei->i_data_sem &rq->__lock irq_context: 0 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP (console_sem).lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &meta->lock irq_context: 0 remove_cache_srcu kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#5 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock nl_table_wait.lock &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 sb_writers#4 &iint->mutex &ei->xattr_sem irq_context: 0 sb_writers#4 &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex &c->lock irq_context: 0 &iint->mutex &lock->wait_lock irq_context: 0 sb_writers#4 &iint->mutex fs_reclaim irq_context: 0 sb_writers#4 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &n->list_lock irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &lock->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 &f->f_pos_lock (console_sem).lock irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner_lock irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock crngs.lock base_crng.lock irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (&brmctx->ip4_mc_router_timer) irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET free_vmap_area_lock irq_context: 0 sk_lock-AF_INET vmap_area_lock irq_context: 0 sk_lock-AF_INET stock_lock irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_INET pack_mutex irq_context: 0 sk_lock-AF_INET text_mutex irq_context: 0 sk_lock-AF_INET text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET &fp->aux->used_maps_mutex irq_context: 0 rtnl_mutex (&brmctx->ip4_other_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip4_own_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_mc_router_timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_other_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_own_query.timer) irq_context: 0 rtnl_mutex (work_completion)(&br->mcast_gc_work) irq_context: 0 rtnl_mutex rcu_state.barrier_mutex irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET batched_entropy_u16.lock crngs.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &f->f_owner.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock fastopen_seqlock.seqcount irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET (&tw->tw_timer) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 slock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 wlock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 &list->lock#30 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 x25_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 rlock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_X25 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pcpu_lock stock_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex rcu_read_lock mfc_unres_lock irq_context: 0 rtnl_mutex rcu_read_lock mfc_unres_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &ep->mtx &ep->lock &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ep->lock &ws->lock &obj_hash[i].lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 &group->mark_mutex &lock->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#22/1 shrinker_rwsem &rq->__lock irq_context: 0 &type->s_umount_key#22/1 shrinker_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#22/1 shrinker_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &ep->mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 uevent_sock_mutex &c->lock irq_context: 0 uevent_sock_mutex &n->list_lock irq_context: 0 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 uevent_sock_mutex &rq->__lock irq_context: 0 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dentry->d_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &lru->node[i].lock irq_context: 0 uevent_sock_mutex.wait_lock irq_context: 0 &u->iolock &cfs_rq->removed.lock irq_context: 0 &u->iolock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN slock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN clock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_CAN irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback rlock-AF_CAN irq_context: softirq rcu_callback elock-AF_CAN irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 prog_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle bit_wait_table + i irq_context: 0 namespace_sem &n->list_lock irq_context: 0 namespace_sem &n->list_lock &c->lock irq_context: 0 namespace_sem namespace_sem.wait_lock irq_context: 0 namespace_sem.wait_lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock stock_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 namespace_sem fs_reclaim &rq->__lock irq_context: 0 namespace_sem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &c->lock irq_context: 0 &type->s_umount_key#22/1 &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_PACKET &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock rcu_read_lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock kfence_freelist_lock irq_context: 0 namespace_sem rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 fs_reclaim irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &____s->seqcount irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC kfence_freelist_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount#2 irq_context: 0 bt_proto_lock &____s->seqcount#2 irq_context: 0 bt_proto_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &____s->seqcount irq_context: 0 &fsnotify_mark_srcu fs_reclaim &rq->__lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 file_rwsem &rcu_state.expedited_wq irq_context: softirq rcu_read_lock hwsim_radio_lock &zone->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &zone->lock irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u8.lock irq_context: softirq (&app->join_timer) &app->lock kfence_freelist_lock irq_context: softirq (&app->join_timer) &app->lock &____s->seqcount#2 irq_context: softirq (&app->join_timer) &app->lock &____s->seqcount irq_context: softirq (&app->join_timer) rcu_read_lock_bh &meta->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh kfence_freelist_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &meta->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) kfence_freelist_lock irq_context: 0 pernet_ops_rwsem __ip_vs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 lock#4 &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex quarantine_lock irq_context: 0 bpf_stats_enabled_mutex irq_context: 0 bpf_stats_enabled_mutex &rq->__lock irq_context: 0 bpf_stats_enabled_mutex &newf->file_lock irq_context: 0 bpf_stats_enabled_mutex fs_reclaim irq_context: 0 bpf_stats_enabled_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bpf_stats_enabled_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 bpf_stats_enabled_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex stock_lock irq_context: 0 bpf_stats_enabled_mutex pool_lock#2 irq_context: 0 bpf_stats_enabled_mutex &sb->s_type->i_lock_key#15 irq_context: 0 bpf_stats_enabled_mutex &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 bpf_stats_enabled_mutex &c->lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex &n->list_lock irq_context: 0 bpf_stats_enabled_mutex &n->list_lock &c->lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex rcu_node_0 irq_context: 0 lock prog_idr_lock &____s->seqcount#2 irq_context: 0 lock prog_idr_lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC fs_reclaim irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock irq_context: 0 sk_lock-AF_TIPC &list->lock#31 irq_context: 0 sk_lock-AF_TIPC &rq->__lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &list->lock#31 irq_context: 0 slock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &list->lock#31 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC &list->lock#31 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 raw_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 raw_lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_IEEE802154 irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_IEEE802154 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC &c->lock irq_context: 0 sk_lock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX slock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX &pn->hash_lock irq_context: 0 sk_lock-AF_PPPOX clock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX rlock-AF_PPPOX irq_context: 0 slock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 lock#4 &obj_hash[i].lock irq_context: 0 &list->lock#32 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX slock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->hash_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX clock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rlock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PPPOX irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_node_0 irq_context: 0 rcu_read_lock &____s->seqcount#10 irq_context: 0 rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 lock prog_idr_lock &n->list_lock irq_context: 0 lock prog_idr_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dev_map_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) dev_map_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->lock &f->f_owner.lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_node_0 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &r->producer_lock#5 irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#48 stock_lock irq_context: 0 &type->i_mutex_dir_key#6 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex &c->lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 quarantine_lock irq_context: 0 bt_proto_lock batched_entropy_u8.lock irq_context: 0 bt_proto_lock kfence_freelist_lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 kn->active#48 &rq->__lock irq_context: 0 kn->active#48 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 cgroup_threadgroup_rwsem &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_node_0 irq_context: 0 kn->active#48 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_lock fasync_lock irq_context: 0 sb_writers#8 &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &obj_hash[i].lock irq_context: 0 tasklist_lock &base->lock irq_context: 0 tasklist_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &n->list_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock krc.lock irq_context: 0 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_owner.lock irq_context: 0 &f->f_lock fasync_lock &new->fa_lock irq_context: 0 &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 &f->f_lock fasync_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 &xt[i].mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 misc_mtx misc_mtx.wait_lock irq_context: 0 misc_mtx.wait_lock irq_context: 0 cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 &group->mark_mutex rcu_read_lock rcu_node_0 irq_context: softirq (&tw->tw_timer) irq_context: softirq (&tw->tw_timer) &hashinfo->ehash_locks[i] irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq (&tw->tw_timer) stock_lock irq_context: softirq (&tw->tw_timer) &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex (console_sem).lock irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &base->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex console_owner_lock irq_context: 0 cb_lock genl_mutex console_owner irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &c->lock batched_entropy_u8.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &c->lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 sb_writers#8 iattr_mutex &rq->__lock irq_context: 0 sb_writers#8 iattr_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem batched_entropy_u8.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF slock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF &rq->__lock irq_context: 0 sk_lock-AF_CAIF &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAIF &obj_hash[i].lock irq_context: 0 slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &this->info_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF clock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF elock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_CAIF irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &xt[i].mutex purge_vmap_area_lock &meta->lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock kfence_freelist_lock irq_context: 0 lock btf_idr_lock irq_context: 0 lock btf_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 fill_pool_map-wait-type-override pgd_lock irq_context: 0 fill_pool_map-wait-type-override stock_lock irq_context: 0 fill_pool_map-wait-type-override key irq_context: 0 fill_pool_map-wait-type-override pcpu_lock irq_context: 0 fill_pool_map-wait-type-override percpu_counters_lock irq_context: 0 fill_pool_map-wait-type-override pcpu_lock stock_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sem->wait_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &ei->socket.wq.wait irq_context: 0 sb_writers &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock hci_dev_list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock (console_sem).lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock (console_sem).lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock fs_reclaim irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rlock-AF_BLUETOOTH irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ipvs->sync_mutex irq_context: 0 ipvs->sync_mutex &mm->mmap_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RDS irq_context: 0 sk_lock-AF_RDS slock-AF_RDS irq_context: 0 sk_lock-AF_RDS batched_entropy_u16.lock irq_context: 0 sk_lock-AF_RDS once_lock irq_context: 0 sk_lock-AF_RDS once_lock crngs.lock irq_context: 0 sk_lock-AF_RDS pool_lock#2 irq_context: 0 sk_lock-AF_RDS &obj_hash[i].lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RDS rcu_node_0 irq_context: 0 sk_lock-AF_RDS &rq->__lock irq_context: 0 sk_lock-AF_RDS &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RDS rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_RDS rcu_read_lock rcu_node_0 irq_context: 0 slock-AF_RDS irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 tracepoints_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 tracepoints_mutex tracepoint_srcu irq_context: 0 tracepoints_mutex &x->wait#3 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex key#25 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 text_mutex text_mutex.wait_lock irq_context: 0 text_mutex &rq->__lock irq_context: 0 text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 text_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_node_0 irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &data->mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &local->iflist_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &local->iflist_mtx hrtimer_bases.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &local->iflist_mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &local->iflist_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &local->iflist_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &local->filter_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &rq->__lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &data->mutex irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq &(&local->scan_work)->timer irq_context: softirq &(&local->scan_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&local->scan_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&local->scan_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx pool_lock#2 irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &list->lock#16 irq_context: softirq rcu_read_lock &local->rx_path_lock &rdev->mgmt_registrations_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &list->lock#16 irq_context: softirq rcu_read_lock &rdev->bss_lock irq_context: softirq rcu_read_lock &rdev->bss_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &rdev->bss_lock pool_lock#2 irq_context: softirq rcu_read_lock &rdev->bss_lock krc.lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_PPPOX irq_context: softirq &(&local->scan_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&local->scan_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &rdev->bss_lock &c->lock irq_context: 0 sk_lock-AF_INET fs_reclaim &rq->__lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex deferred_probe_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock irq_context: softirq rcu_read_lock &rdev->bss_lock krc.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &rdev->bss_lock krc.lock &base->lock irq_context: softirq rcu_read_lock &rdev->bss_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 &u->iolock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC init_mm.page_table_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: softirq &(&local->scan_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &child->perf_event_mutex rcu_node_0 irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &r->producer_lock#3 irq_context: softirq &(&conn->disc_work)->timer irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) &n->list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 misc_mtx remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 misc_mtx remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient &rq->__lock irq_context: 0 (wq_completion)events_power_efficient &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx kfence_freelist_lock irq_context: softirq rcu_read_lock &rdev->bss_lock &n->list_lock irq_context: softirq rcu_read_lock &rdev->bss_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_UNIX &mm->mmap_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &n->list_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &obj_hash[i].lock pool_lock irq_context: 0 &type->i_mutex_dir_key#6 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#6 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#6 &xa->xa_lock#13 irq_context: 0 &type->i_mutex_dir_key#6 &dentry->d_lock &wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->filter_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx tk_core.seq.seqcount irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rdev->wiphy_work_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rdev->wiphy_work_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &list->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#9 kfence_freelist_lock irq_context: 0 ebt_mutex ebt_mutex.wait_lock irq_context: 0 ebt_mutex.wait_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rlock-AF_PPPOX irq_context: 0 sb_writers#8 tomoyo_ss rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx batched_entropy_u8.lock crngs.lock irq_context: 0 rcu_read_lock rcu_read_lock &____s->seqcount#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &meta->lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock (console_sem).lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 batched_entropy_u16.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq (&n->timer) &n->lock pool_lock#2 irq_context: softirq (&n->timer) &c->lock irq_context: softirq (&n->timer) rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 delayed_uprobe_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) &n->lock &c->lock irq_context: 0 cb_lock remove_cache_srcu &rq->__lock irq_context: 0 cb_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events &obj_hash[i].lock irq_context: 0 cb_lock (console_sem).lock irq_context: 0 cb_lock console_lock console_srcu console_owner_lock irq_context: 0 cb_lock console_lock console_srcu console_owner irq_context: 0 cb_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq (&n->timer) icmp_global.lock irq_context: softirq (&n->timer) icmp_global.lock batched_entropy_u8.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &ul->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET batched_entropy_u32.lock irq_context: softirq (&n->timer) k-slock-AF_INET &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&n->timer) &dir->lock irq_context: softirq (&n->timer) stock_lock irq_context: softirq (&n->timer) &____s->seqcount irq_context: softirq (&n->timer) rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &data->mutex irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->iflist_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->iflist_mtx hrtimer_bases.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->iflist_mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->iflist_mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->filter_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx nl_table_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &data->mutex irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &data->mutex irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex stock_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 vlan_ioctl_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex bus_type_sem irq_context: 0 vlan_ioctl_mutex rtnl_mutex input_pool.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex failover_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 &bat_priv->bat_v.ogm_buff_mutex &rq->__lock irq_context: 0 proto_tab_lock irq_context: 0 proto_tab_lock pool_lock#2 irq_context: 0 proto_tab_lock &dir->lock irq_context: 0 proto_tab_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_NFC irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &obj_hash[i].lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock krc.lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu irq_context: 0 cb_lock genl_mutex remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock genl_mutex &bat_priv->tp_list_lock irq_context: 0 cb_lock genl_mutex &bat_priv->tp_list_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &bat_priv->tp_list_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex kthread_create_lock irq_context: 0 cb_lock genl_mutex &x->wait irq_context: 0 &bat_priv->tp_list_lock irq_context: 0 (&tp_vars->timer) irq_context: 0 &tp_vars->unacked_lock irq_context: 0 sk_lock-AF_AX25 irq_context: 0 sk_lock-AF_AX25 slock-AF_AX25 irq_context: 0 slock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 slock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 clock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 ax25_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &list->lock#33 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 rlock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 wlock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_AX25 irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &base->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock irq_context: 0 sk_lock-AF_AX25 ax25_uid_lock irq_context: 0 sk_lock-AF_ROSE irq_context: 0 sk_lock-AF_ROSE slock-AF_ROSE irq_context: 0 sk_lock-AF_ROSE rose_node_list_lock irq_context: 0 slock-AF_ROSE irq_context: 0 &type->i_mutex_dir_key#5 pgd_lock irq_context: 0 &type->i_mutex_dir_key#5 stock_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#5 key irq_context: 0 &type->i_mutex_dir_key#5 pcpu_lock irq_context: 0 &type->i_mutex_dir_key#5 percpu_counters_lock irq_context: 0 &type->i_mutex_dir_key#5 pcpu_lock stock_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock stock_lock irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK irq_context: 0 sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 sk_lock-AF_VSOCK vsock_table_lock batched_entropy_u32.lock irq_context: 0 sk_lock-AF_VSOCK &rq->__lock irq_context: 0 sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_VSOCK irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 key#26 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 rtnl_mutex device_links_lock &rq->__lock irq_context: 0 rtnl_mutex device_links_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &cfs_rq->removed.lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pcpu_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss pgd_lock irq_context: 0 tomoyo_ss stock_lock irq_context: 0 tomoyo_ss key irq_context: 0 tomoyo_ss pcpu_lock irq_context: 0 tomoyo_ss percpu_counters_lock irq_context: 0 tomoyo_ss pcpu_lock stock_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &po->bind_lock irq_context: 0 fanout_mutex irq_context: 0 fanout_mutex &rq->__lock irq_context: 0 sb_writers#4 sb_internal quarantine_lock irq_context: 0 clock-AF_PACKET irq_context: 0 elock-AF_PACKET irq_context: 0 &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP slock-AF_BLUETOOTH-BTPROTO_L2CAP &sk->sk_lock.wq#2 irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_L2CAP &sk->sk_lock.wq#2 irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_L2CAP &sk->sk_lock.wq#2 &p->pi_lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_L2CAP &sk->sk_lock.wq#2 &p->pi_lock &rq->__lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_L2CAP &sk->sk_lock.wq#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fp->aux->used_maps_mutex &map->owner.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &aux->poke_mutex irq_context: 0 &xt[i].mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &aux->poke_mutex irq_context: 0 sb_writers#4 sb_internal &pcp->lock &zone->lock irq_context: 0 &aux->poke_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 map_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &n->list_lock irq_context: 0 sk_lock-AF_TIPC &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &rnp->exp_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->filter_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx tk_core.seq.seqcount irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx &rq->__lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->hash_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->hash_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->hash_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->hash_lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock deferred_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->filter_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx tk_core.seq.seqcount irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)phy4 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 (console_sem).lock irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock/1 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key bit_wait_table + i irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_wait_transaction_locked irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle kfence_freelist_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#13 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 stock_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &dentry->d_lock &wq#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#13 &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#13 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#13 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#13 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &info->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 key#9 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sem->wait_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &pcp->lock &zone->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &wb->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock key#10 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 nf_sockopt_mutex &cfs_rq->removed.lock irq_context: 0 nf_sockopt_mutex &obj_hash[i].lock irq_context: 0 nf_sockopt_mutex pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &____s->seqcount#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &____s->seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key krc.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 &dir->lock#2 quarantine_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &base->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu pgd_lock irq_context: 0 remove_cache_srcu stock_lock irq_context: 0 remove_cache_srcu key irq_context: 0 remove_cache_srcu pcpu_lock irq_context: 0 remove_cache_srcu percpu_counters_lock irq_context: 0 remove_cache_srcu pcpu_lock stock_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work quarantine_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &sctp_ep_hashtable[i].lock irq_context: 0 sk_lock-AF_INET6 sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &c->lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock clock-AF_INET irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET elock-AF_INET irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq net/ipv4/devinet.c:474 irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex pack_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 rlock-AF_BLUETOOTH irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 quarantine_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#5 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG quarantine_lock irq_context: softirq &(&hctx->run_work)->timer irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &c->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex free_vmap_area_lock &base->lock irq_context: 0 &xt[i].mutex free_vmap_area_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 loop_validate_mutex &lo->lo_mutex &rq->__lock irq_context: 0 loop_validate_mutex &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &sbi->s_writepages_rwsem &pcp->lock &zone->lock irq_context: softirq rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_ALG batched_entropy_u8.lock irq_context: 0 sk_lock-AF_ALG kfence_freelist_lock irq_context: 0 sk_lock-AF_ALG &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 rcu_read_lock rcu_node_0 irq_context: 0 &type->s_umount_key#48 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock (console_sem).lock irq_context: 0 rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 nlk_cb_mutex-NETFILTER irq_context: 0 nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-NETFILTER &c->lock irq_context: 0 nlk_cb_mutex-NETFILTER &n->list_lock irq_context: 0 nlk_cb_mutex-NETFILTER &n->list_lock &c->lock irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock pool_lock#2 irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock rcu_node_0 irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock &rq->__lock irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nlk_cb_mutex-NETFILTER rlock-AF_NETLINK irq_context: 0 nlk_cb_mutex-NETFILTER &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock &base->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &base->lock irq_context: 0 &mm->mmap_lock &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &cfs_rq->removed.lock irq_context: 0 &hdev->lock &n->list_lock irq_context: 0 &hdev->lock &n->list_lock &c->lock irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex quarantine_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &pcp->lock &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &ei->socket.wq.wait irq_context: 0 sk_lock-AF_TIPC &zone->lock irq_context: 0 sk_lock-AF_TIPC &____s->seqcount irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &sem->wait_lock irq_context: 0 sk_lock-AF_TIPC &sem->wait_lock irq_context: 0 sk_lock-AF_TIPC &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bt_proto_lock hidp_sk_list.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC quarantine_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page) irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 hidp_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_BLUETOOTH irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &base->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#8 kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &meta->lock irq_context: 0 rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex (work_completion)(&(&br->gc_work)->work) irq_context: 0 rtnl_mutex (&br->hello_timer) irq_context: 0 rtnl_mutex (&br->topology_change_timer) irq_context: 0 rtnl_mutex (&br->tcn_timer) irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &im->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &pmc->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key krc.lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &br->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &br->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &br->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex &br->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex net_dm_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex net_dm_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex net_dm_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex net_dm_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex net_dm_mutex &data->lock irq_context: 0 cb_lock genl_mutex net_dm_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex net_dm_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex net_dm_mutex &c->lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &data->lock irq_context: softirq rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &data->lock &base->lock irq_context: softirq rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 &data->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_INET6 &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &data->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &data->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &data->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &sk->sk_lock.wq irq_context: 0 slock-AF_INET &sk->sk_lock.wq irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &cfs_rq->removed.lock irq_context: softirq (&data->send_timer) irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &data->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) nl_table_wait.lock irq_context: 0 &data->lock &obj_hash[i].lock irq_context: 0 &data->lock &base->lock irq_context: 0 &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) irq_context: softirq (&pool->idle_timer) &pool->lock/1 irq_context: softirq (&pool->idle_timer) &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock/1 &base->lock irq_context: softirq (&pool->idle_timer) &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 rtnl_mutex &data->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pcpu_alloc_mutex &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: hardirq rcu_state.barrier_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 misc_mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 misc_mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 misc_mtx rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &data->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &data->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &data->lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &data->lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &data->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex &meta->lock irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &meta->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &lo->lo_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_node_0 irq_context: 0 pidmap_lock &obj_hash[i].lock irq_context: 0 pidmap_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_node_0 irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &obj_hash[i].lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &data->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 tracepoints_mutex &n->list_lock irq_context: 0 tracepoints_mutex &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock &data->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &____s->seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &____s->seqcount#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &pcp->lock &zone->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 namespace_sem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 namespace_sem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 namespace_sem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex fs_reclaim irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex stock_lock irq_context: 0 &nft_net->commit_mutex &c->lock irq_context: 0 &nft_net->commit_mutex &n->list_lock irq_context: 0 &nft_net->commit_mutex &n->list_lock &c->lock irq_context: 0 &nft_net->commit_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex pool_lock#2 irq_context: 0 &nft_net->commit_mutex batched_entropy_u32.lock irq_context: 0 &nft_net->commit_mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rhashtable_bucket irq_context: 0 &nft_net->commit_mutex &____s->seqcount irq_context: 0 &nft_net->commit_mutex rcu_read_lock pool_lock#2 irq_context: 0 &nft_net->commit_mutex &rnp->exp_lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rnp->exp_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rnp->exp_wq[0] irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex (work_completion)(&ht->run_work) irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#13 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#13 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &sem->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &____s->seqcount irq_context: 0 kn->active#21 fs_reclaim &rq->__lock irq_context: 0 kn->active#21 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER (console_sem).lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 vlan_ioctl_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_node_0 irq_context: 0 nfnl_subsys_nftables rcu_read_lock rcu_node_0 irq_context: 0 nfnl_subsys_nftables rcu_read_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rnp->exp_wq[1] irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &app->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->join_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->periodic_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &list->lock#11 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->join_timer)#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &app->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &list->lock#12 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &n->list_lock &c->lock irq_context: 0 sb_writers#3 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &nft_net->commit_mutex &p->pi_lock irq_context: 0 &nft_net->commit_mutex &p->pi_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem stock_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) pool_lock#2 irq_context: 0 (wq_completion)phy15 irq_context: 0 (wq_completion)phy15 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy15 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)phy16 irq_context: 0 (wq_completion)phy16 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy16 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sb_writers#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu pgd_lock irq_context: 0 &fsnotify_mark_srcu stock_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock pool_lock#2 irq_context: 0 &fsnotify_mark_srcu key irq_context: 0 &fsnotify_mark_srcu pcpu_lock irq_context: 0 &fsnotify_mark_srcu percpu_counters_lock irq_context: 0 &fsnotify_mark_srcu pcpu_lock stock_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex.wait_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &c->lock irq_context: 0 kn->active#21 &n->list_lock irq_context: 0 kn->active#21 &n->list_lock &c->lock irq_context: softirq (&pool->idle_timer) &pool->lock irq_context: softirq (&pool->idle_timer) &pool->lock &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock &base->lock irq_context: softirq (&pool->idle_timer) &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex ima_extend_list_mutex &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &n->list_lock &c->lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh &data->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_PACKET init_mm.page_table_lock irq_context: 0 sk_lock-AF_PACKET batched_entropy_u32.lock crngs.lock irq_context: 0 rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &base->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock &obj_hash[i].lock pool_lock irq_context: 0 sock_diag_mutex irq_context: 0 sock_diag_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &data->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &data->lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &journal->j_wait_reserved irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &data->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex rcu_node_0 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy17 irq_context: 0 (wq_completion)phy17 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy17 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PACKET text_mutex text_mutex.wait_lock irq_context: 0 sk_lock-AF_PACKET text_mutex &rq->__lock irq_context: 0 sk_lock-AF_PACKET text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &data->lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex pool_lock#2 irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key &data->lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock &data->lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 lock link_idr_lock &n->list_lock irq_context: 0 lock link_idr_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &data->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &meta->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock kfence_freelist_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &meta->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_VSOCK fs_reclaim irq_context: 0 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy18 irq_context: 0 (wq_completion)phy18 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy18 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK &mm->mmap_lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock &base->lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex &obj_hash[i].lock pool_lock irq_context: 0 &iint->mutex &meta->lock irq_context: 0 sb_writers#5 &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (t) irq_context: softirq (t) &obj_hash[i].lock irq_context: softirq (t) &base->lock irq_context: softirq (t) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ebt_mutex &cfs_rq->removed.lock irq_context: 0 ebt_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock (work_completion)(flush) irq_context: 0 rtnl_mutex lapb_list_lock &n->list_lock irq_context: 0 rtnl_mutex lapb_list_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) pool_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 sk_lock-AF_VSOCK &c->lock irq_context: 0 sk_lock-AF_VSOCK &n->list_lock irq_context: 0 sk_lock-AF_VSOCK &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &pcp->lock &zone->lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work ima_keys_lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work pool_lock#2 irq_context: 0 (wq_completion)events_freezable &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex ipvs->sync_mutex fs_reclaim irq_context: 0 rtnl_mutex ipvs->sync_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ipvs->sync_mutex &c->lock irq_context: 0 rtnl_mutex ipvs->sync_mutex pool_lock#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex ipvs->sync_mutex stock_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ipvs->sync_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex ipvs->sync_mutex &n->list_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &rq->__lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex &dir->lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex ipvs->sync_mutex k-slock-AF_INET irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 rtnl_mutex ipvs->sync_mutex kthread_create_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &p->pi_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex &x->wait irq_context: 0 rtnl_mutex ipvs->sync_mutex &ipvs->sync_buff_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &ipvs->sync_lock irq_context: 0 &ipvs->sync_buff_lock irq_context: 0 &xt[i].mutex pgd_lock irq_context: 0 &xt[i].mutex key irq_context: 0 &xt[i].mutex pcpu_lock irq_context: 0 &xt[i].mutex percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->w) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&w->w) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &ul->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock crngs.lock irq_context: 0 rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#13 &n->list_lock irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#13 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &lock->wait_lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 tasklist_lock &sighand->siglock &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-sk_lock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &table->hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &xa->xa_lock#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &data->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &data->lock &base->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &____s->seqcount#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_PACKET rcu_node_0 irq_context: 0 sk_lock-AF_PACKET &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_PACKET &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_PACKET &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex stock_lock irq_context: 0 &xt[i].mutex pcpu_lock stock_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 remove_cache_srcu &base->lock irq_context: 0 remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &rq->__lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 sk_lock-AF_INET &data->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &tsk->futex_exit_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock hwsim_radio_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock hwsim_radio_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex rcu_state.exp_wake_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_wq[2] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &sighand->siglock irq_context: 0 sk_lock-AF_INET &sighand->siglock irq_context: 0 sk_lock-AF_INET &sighand->siglock stock_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &c->lock irq_context: 0 sk_lock-AF_INET &sighand->siglock pool_lock#2 irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex kauditd_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &base->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &sch->q.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#15 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &data->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &lock->wait_lock irq_context: 0 rtnl_mutex &list->lock#2 irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount &p->pi_lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &data->lock &base->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 pernet_ops_rwsem wq_pool_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_wq[0] irq_context: 0 (wq_completion)events free_ipc_work &pool->lock irq_context: 0 (wq_completion)events free_ipc_work &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &sch->q.lock irq_context: softirq rcu_read_lock &sch->q.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&lapb->t1timer) &lapb->lock &____s->seqcount#2 irq_context: 0 rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &____s->seqcount irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET6 &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pcpu_lock stock_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu pool_lock#2 irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex.wait_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &data->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &data->lock &base->lock irq_context: 0 sk_lock-AF_INET6 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET remove_cache_srcu &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &n->list_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &n->list_lock irq_context: softirq (&lapb->t1timer) &lapb->lock &n->list_lock &c->lock irq_context: 0 nfnl_subsys_ipset &rq->__lock irq_context: 0 nfnl_subsys_ipset &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 proto_tab_lock &c->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 slock-AF_ALG irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 &rq->__lock irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 pool_lock#2 irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &data->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) quarantine_lock irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 rcu_read_lock rcu_node_0 irq_context: hardirq rcu_read_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &meta->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_node_0 irq_context: 0 cb_lock genl_mutex &data->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex _xmit_IPGRE pool_lock#2 irq_context: softirq rcu_callback &data->lock &obj_hash[i].lock irq_context: softirq rcu_callback &data->lock &base->lock irq_context: softirq rcu_callback &data->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex &rq->__lock irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 loop_validate_mutex &rq->__lock irq_context: 0 loop_validate_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &____s->seqcount#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &____s->seqcount irq_context: 0 tomoyo_ss remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_node_0 irq_context: 0 sb_writers &dentry->d_lock irq_context: 0 sb_writers tomoyo_ss irq_context: 0 sb_writers tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#5 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &lruvec->lru_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &info->lock key#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_TUNNEL#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_LOOPBACK#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fib_info_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 sb_writers tomoyo_ss &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &cfs_rq->removed.lock irq_context: 0 sb_writers &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &data->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &data->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 namespace_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex.wait_lock irq_context: 0 &type->s_umount_key#22/1 &p->pi_lock irq_context: 0 &type->s_umount_key#22/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#22/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sb_writers#4 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 key#23 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 kfence_freelist_lock irq_context: 0 &xn->hash_lock irq_context: 0 &xn->hash_lock &rq->__lock irq_context: 0 &xn->hash_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xn->hash_lock fs_reclaim irq_context: 0 &xn->hash_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xn->hash_lock pool_lock#2 irq_context: 0 &xn->hash_lock &est->lock irq_context: 0 &xn->hash_lock &est->lock &obj_hash[i].lock irq_context: 0 &xn->hash_lock &est->lock &base->lock irq_context: 0 &xn->hash_lock &est->lock &base->lock &obj_hash[i].lock irq_context: 0 &xn->hash_lock (&est->timer) irq_context: 0 &xn->hash_lock &obj_hash[i].lock irq_context: 0 &xn->hash_lock &base->lock irq_context: 0 &xn->hash_lock &base->lock &obj_hash[i].lock irq_context: 0 &xn->hash_lock krc.lock irq_context: 0 &xn->hash_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 &bdi->wb_waitq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &wb->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_pagefaults &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &base->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &c->lock irq_context: 0 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#19 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#19 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &bgl->locks[i].lock irq_context: 0 sk_lock-AF_INET6 &sw_ctx_rx->wq irq_context: 0 &fsnotify_mark_srcu &n->list_lock irq_context: 0 &fsnotify_mark_srcu &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 namespace_sem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ifibss->timer) irq_context: softirq (&ifibss->timer) &rdev->wiphy_work_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#5 &lruvec->lru_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#5 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rlock-AF_NETLINK irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 &lruvec->lru_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 &u->iolock &pcp->lock &zone->lock irq_context: 0 &u->iolock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex deferred_probe_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex deferred_probe_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &base->lock irq_context: 0 &xt[i].mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock batched_entropy_u8.lock irq_context: 0 cb_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &ht->mutex quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_node_0 irq_context: 0 kn->active#5 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 kn->active#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &p->pi_lock irq_context: 0 tracepoints_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &hdev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &hdev->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &hdev->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &hdev->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &hdev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &msk->pm.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex fib_info_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 krc.lock fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq (&peer->timer_retransmit_handshake) &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &obj_hash[i].lock pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle kfence_freelist_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex quarantine_lock irq_context: 0 &xt[i].mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &data->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &c->lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &rq->__lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &nf_conntrack_locks[i] irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &____s->seqcount#7 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &nf_conntrack_locks[i]/1 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rcu_read_lock &nf_nat_locks[i] irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock &c->lock irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink rlock-AF_NETLINK irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &data->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink &lock->wait_lock irq_context: 0 nfnl_subsys_ctnetlink &rq->__lock irq_context: 0 nfnl_subsys_ctnetlink &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &meta->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 pgd_lock irq_context: 0 &mm->mmap_lock sb_writers#4 stock_lock irq_context: 0 &mm->mmap_lock sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 key irq_context: 0 &mm->mmap_lock sb_writers#4 pcpu_lock irq_context: 0 &mm->mmap_lock sb_writers#4 percpu_counters_lock irq_context: 0 &mm->mmap_lock sb_writers#4 pcpu_lock stock_lock irq_context: 0 &hdev->lock &lock->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &iint->mutex sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 &iint->mutex sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq (&app->periodic_timer) &app->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->periodic_timer) &app->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &data->lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock &n->list_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 kn->active#59 fs_reclaim irq_context: 0 kn->active#59 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#59 &c->lock irq_context: 0 kn->active#59 stock_lock irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpuset_hotplug_work irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpuset_hotplug_work &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpuset_hotplug_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 (wq_completion)cpuset_migrate_mm irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex &x->wait#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &pn->l2tp_tunnel_idr_lock irq_context: 0 &pn->l2tp_tunnel_idr_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock irq_context: 0 sk_lock-AF_PPPOX fs_reclaim irq_context: 0 sk_lock-AF_PPPOX fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PPPOX fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_PPPOX pool_lock#2 irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &tunnel->hlist_lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &tunnel->hlist_lock &pn->l2tp_session_hlist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &tunnel->hlist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->l2tp_session_hlist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &list->lock#34 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &ps->sk_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 cpu_hotplug_lock irq_context: 0 (wq_completion)l2tp irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &tunnel->hlist_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock pool_lock#2 irq_context: softirq rcu_callback rlock-AF_PPPOX irq_context: softirq rcu_callback wlock-AF_PPPOX irq_context: softirq rcu_callback clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex __ip_vs_mutex &rq->__lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &base->lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 percpu_counters_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_ALG &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &data->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock krc.lock &base->lock irq_context: 0 rtnl_mutex &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock &cfs_rq->removed.lock irq_context: 0 file_rwsem &cfs_rq->removed.lock irq_context: 0 file_rwsem &obj_hash[i].lock irq_context: 0 file_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_node_0 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#7 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key#2 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key#2 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#7 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#7 _xmit_IPGRE irq_context: 0 rtnl_mutex team->team_lock_key#7 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 _xmit_IPGRE pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 lock irq_context: 0 rtnl_mutex team->team_lock_key#7 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#7 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#7 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key#2/1 irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key#2/1 _xmit_IPGRE irq_context: 0 rtnl_mutex team->team_lock_key#7 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_NETROM irq_context: 0 sk_lock-AF_NETROM slock-AF_NETROM irq_context: 0 sk_lock-AF_NETROM ax25_uid_lock irq_context: 0 sk_lock-AF_NETROM nr_list_lock irq_context: 0 slock-AF_NETROM irq_context: 0 nr_list_lock irq_context: 0 sk_lock-AF_NETROM pool_lock#2 irq_context: 0 sk_lock-AF_NETROM &list->lock#35 irq_context: 0 sk_lock-AF_NETROM &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM &base->lock irq_context: 0 sk_lock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM &rq->__lock irq_context: 0 sk_lock-AF_NETROM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM &data->lock irq_context: 0 sk_lock-AF_NETROM &ei->socket.wq.wait irq_context: softirq net/netrom/nr_loopback.c:18 irq_context: softirq net/netrom/nr_loopback.c:18 &list->lock#35 irq_context: softirq net/netrom/nr_loopback.c:18 nr_list_lock irq_context: softirq net/netrom/nr_loopback.c:18 pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 &dir->lock irq_context: softirq net/netrom/nr_loopback.c:18 &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &c->lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &list->lock#35 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &base->lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &data->lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rlock-AF_NETROM irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/netrom/nr_loopback.c:18 &base->lock irq_context: softirq net/netrom/nr_loopback.c:18 &base->lock &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 &data->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &list->lock#35 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &data->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &data->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM wlock-AF_NETROM irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &list->lock#22 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#9 irq_context: softirq net/netrom/nr_loopback.c:18 &c->lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN irq_context: 0 sk_lock-AF_CAN slock-AF_CAN irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &rq->__lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN fs_reclaim irq_context: 0 sk_lock-AF_CAN fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_CAN &c->lock irq_context: 0 sk_lock-AF_CAN pool_lock#2 irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock fs_reclaim irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &____s->seqcount irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &c->lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &net->can.rcvlists_lock irq_context: 0 sk_lock-AF_CAN &priv->lock irq_context: 0 sk_lock-AF_CAN &priv->j1939_socks_lock irq_context: 0 slock-AF_CAN irq_context: 0 sk_lock-AF_CAN &jsk->sk_session_queue_lock irq_context: 0 sk_lock-AF_CAN &mm->mmap_lock irq_context: 0 sk_lock-AF_CAN &list->lock#36 irq_context: 0 sk_lock-AF_CAN &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAN &priv->active_session_list_lock irq_context: 0 sk_lock-AF_CAN &rq->__lock irq_context: 0 sk_lock-AF_CAN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN hrtimer_bases.lock irq_context: 0 sk_lock-AF_CAN hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_CAN hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &priv->lock irq_context: softirq rcu_read_lock rcu_read_lock &priv->active_session_list_lock irq_context: softirq rcu_read_lock rcu_read_lock hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock &priv->j1939_socks_lock irq_context: softirq rcu_read_lock rcu_read_lock &list->lock#36 irq_context: softirq &priv->j1939_socks_lock irq_context: softirq &list->lock#36 irq_context: softirq rcu_read_lock rcu_read_lock &priv->j1939_socks_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &priv->j1939_socks_lock rlock-AF_CAN irq_context: softirq rcu_read_lock rcu_read_lock &jsk->sk_session_queue_lock irq_context: 0 sk_lock-AF_CAN &data->lock irq_context: 0 sk_lock-AF_CAN &jsk->waitq irq_context: 0 rlock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &priv->j1939_socks_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &priv->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &net->can.rcvlists_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &priv->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#7 &c->lock irq_context: 0 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock &priv->j1939_socks_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_CAN &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_CAN &____s->seqcount#2 irq_context: 0 sk_lock-AF_CAN &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_node_0 irq_context: 0 misc_mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&sk->sk_timer)#2 irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM irq_context: softirq (&sk->sk_timer)#2 nr_list_lock irq_context: softirq (&sk->sk_timer)#2 &obj_hash[i].lock irq_context: softirq (&sk->sk_timer)#2 wlock-AF_NETROM irq_context: softirq (&sk->sk_timer)#2 &list->lock#22 irq_context: softirq (&sk->sk_timer)#2 rlock-AF_NETROM irq_context: softirq (&sk->sk_timer)#2 &base->lock irq_context: softirq (&sk->sk_timer)#2 &base->lock &obj_hash[i].lock irq_context: softirq (&sk->sk_timer)#2 &data->lock irq_context: softirq (&sk->sk_timer)#2 pool_lock#2 irq_context: softirq (&sk->sk_timer)#2 &____s->seqcount irq_context: softirq (&sk->sk_timer)#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 rtnl_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &list->lock#16 irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &n->list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_read_lock rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock key#15 irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM &obj_hash[i].lock irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM &base->lock irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#7 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 raw_notifier_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &n->list_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 raw_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN slock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN clock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex slock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_CAN irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 &base->lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock &c->lock irq_context: softirq (&mp->timer) &br->multicast_lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock &dir->lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock deferred_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&p->timer) irq_context: softirq (&p->timer) &br->multicast_lock irq_context: softirq (&mp->timer) &br->multicast_lock nl_table_lock irq_context: softirq (&p->timer) &br->multicast_lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock &dir->lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock &____s->seqcount irq_context: softirq (&p->timer) &br->multicast_lock deferred_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock pool_lock#2 irq_context: softirq (&p->timer) &br->multicast_lock nl_table_lock irq_context: softirq (&mp->timer) &br->multicast_lock nl_table_wait.lock irq_context: softirq (&p->timer) &br->multicast_lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock &n->list_lock irq_context: softirq (&p->timer) &br->multicast_lock nl_table_wait.lock irq_context: softirq (&mp->timer) &br->multicast_lock &n->list_lock &c->lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock &____s->seqcount#2 irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&p->timer) &br->multicast_lock &base->lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&p->timer) &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&p->timer) &br->multicast_lock &c->lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&p->timer) &br->multicast_lock &____s->seqcount#2 irq_context: softirq (&p->timer) &br->multicast_lock &____s->seqcount irq_context: softirq (&p->timer) &br->multicast_lock &n->list_lock irq_context: softirq (&p->timer) &br->multicast_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex act_mod_lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock pool_lock#2 irq_context: 0 rtnl_mutex &p->tcfa_lock irq_context: 0 rtnl_mutex &p->tcfa_lock &(to_police(*a)->tcfp_lock) irq_context: 0 rtnl_mutex &p->tcfa_lock &(to_police(*a)->tcfp_lock) tk_core.seq.seqcount irq_context: 0 rtnl_mutex &p->tcfa_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex flow_indr_block_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers tomoyo_ss &n->list_lock irq_context: 0 sb_writers tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 rtnl_mutex &p->tcfa_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &fq->mq_flush_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 sb_writers#4 &c->lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 &n->list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_writers#4 kfence_freelist_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_node_0 irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &meta->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem netns_bpf_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex device_links_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex device_links_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &data->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->lock irq_context: 0 &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 percpu_counters_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pcpu_lock stock_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &n->list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &n->list_lock &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock console_owner_lock irq_context: 0 rtnl_mutex &br->lock console_owner irq_context: 0 rtnl_mutex &tn->lock &rq->__lock irq_context: 0 rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &im->lock irq_context: 0 &smc->clcsock_release_lock irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock nf_sockopt_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) &base->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->arp_work)->timer irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss &base->lock irq_context: 0 sb_writers#8 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond4#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu batched_entropy_u8.lock irq_context: 0 &fsnotify_mark_srcu kfence_freelist_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &n->list_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)bond10 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &base->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &rq->__lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->lock fs_reclaim irq_context: 0 rtnl_mutex &block->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &block->lock &c->lock irq_context: 0 rtnl_mutex &block->lock &n->list_lock irq_context: 0 rtnl_mutex &block->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &block->lock pool_lock#2 irq_context: 0 rtnl_mutex cls_mod_lock irq_context: 0 rtnl_mutex &block->lock &____s->seqcount irq_context: 0 rtnl_mutex &block->lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &block->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &block->lock nl_table_lock irq_context: 0 rtnl_mutex &block->lock nl_table_wait.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) &dir->lock#2 irq_context: softirq (&n->timer) &ul->lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#22/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->s_umount_key#22/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 sock_diag_mutex fs_reclaim irq_context: 0 sock_diag_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sock_diag_mutex pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock &ndev->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&n->timer) &data->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq (&n->timer) quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &n->list_lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &n->list_lock &c->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 pool_lock#2 irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 fanout_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 fanout_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sock_diag_mutex rcu_read_lock &c->lock irq_context: 0 sock_diag_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#37 irq_context: 0 proto_tab_lock &n->list_lock irq_context: 0 proto_tab_lock &n->list_lock &c->lock irq_context: 0 krc.lock &obj_hash[i].lock irq_context: 0 krc.lock &base->lock irq_context: 0 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &____s->seqcount irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 rcu_read_lock pgd_lock irq_context: 0 sb_writers#4 rcu_read_lock stock_lock irq_context: 0 sb_writers#4 rcu_read_lock key irq_context: 0 sb_writers#4 rcu_read_lock pcpu_lock irq_context: 0 sb_writers#4 rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#4 rcu_read_lock pcpu_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock quarantine_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rtnl_mutex pcpu_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &queue->rskq_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock _xmit_NETROM irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET irq_context: 0 rtnl_mutex sk_lock-AF_INET slock-AF_INET irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET rcu_read_lock &im->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock pool_lock#2 irq_context: 0 rtnl_mutex slock-AF_INET irq_context: 0 rtnl_mutex rcu_read_lock &im->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal &base->lock irq_context: 0 sb_writers#4 sb_internal &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex krc.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 kfence_freelist_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &base->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#7 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 tomoyo_ss &meta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rnp->exp_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &list->lock#15 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &rq->__lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 &group->mark_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &group->mark_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock irq_context: 0 sk_lock-AF_TIPC &list->lock#38 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &data->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &list->lock#38 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &data->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &c->lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &n->list_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC rcu_node_0 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC quarantine_lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC tk_core.seq.seqcount irq_context: 0 sk_lock-AF_TIPC &list->lock#5 irq_context: 0 sk_lock-AF_TIPC batched_entropy_u8.lock irq_context: 0 sk_lock-AF_TIPC kfence_freelist_lock irq_context: 0 sk_lock-AF_TIPC &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &data->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &data->lock &base->lock irq_context: 0 sk_lock-AF_TIPC &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: softirq (&n->timer) &data->lock &obj_hash[i].lock irq_context: softirq (&n->timer) &data->lock &base->lock irq_context: softirq (&n->timer) &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &____s->seqcount#2 irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx &data->mutex irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &____s->seqcount irq_context: 0 sb_writers#4 sb_internal &cfs_rq->removed.lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu &c->lock irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu &n->list_lock irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) icmp_global.lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 &iint->mutex remove_cache_srcu &rq->__lock irq_context: 0 &iint->mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 &group->mark_mutex batched_entropy_u8.lock irq_context: 0 &group->mark_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &meta->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &data->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->filter_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle bit_wait_table + i irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 link_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 mapping.invalidate_lock &____s->seqcount irq_context: 0 mapping.invalidate_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock stock_lock irq_context: 0 mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 mapping.invalidate_lock lock#4 irq_context: 0 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 mapping.invalidate_lock &rq->__lock irq_context: 0 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &data->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock mapping.invalidate_lock irq_context: 0 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock &dd->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock stock_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &data->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &c->lock irq_context: softirq (&n->timer) &n->lock &data->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ul->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 mapping.invalidate_lock &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock &folio_wait_table[i] &p->pi_lock irq_context: 0 mapping.invalidate_lock &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 mapping.invalidate_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 &iint->mutex &n->list_lock irq_context: 0 sb_writers#4 &iint->mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 tomoyo_ss &cfs_rq->removed.lock irq_context: softirq (&n->timer) &n->list_lock irq_context: softirq (&n->timer) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &data->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &data->lock &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem &rq->__lock irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 &base->lock irq_context: 0 &mm->mmap_lock sb_writers#4 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#7 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock quarantine_lock irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock &obj_hash[i].lock irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock pool_lock#2 irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &sem->wait_lock irq_context: 0 mapping.invalidate_lock &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &meta->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &data->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &f->f_owner.lock rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 sk_lock-AF_INET6 &f->f_owner.lock rcu_read_lock &sighand->siglock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex irq_context: 0 &pipe->mutex/1 rtnl_mutex fs_reclaim irq_context: 0 &pipe->mutex/1 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 rtnl_mutex &c->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex (console_sem).lock irq_context: 0 &pipe->mutex/1 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &rq->__lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &n->list_lock &c->lock irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 &____s->seqcount#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 &____s->seqcount irq_context: 0 &chan->lock/1 irq_context: 0 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 irq_context: 0 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &chan->lock/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 bt_proto_lock sco_sk_list.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 sco_sk_list.lock irq_context: 0 &root->kernfs_rwsem rcu_node_0 irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex bpf_devs_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &base->lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO &conn->lock#2 irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO &obj_hash[i].lock irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO &base->lock irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO &base->lock &obj_hash[i].lock irq_context: 0 &hdev->lock slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &ei->socket.wq.wait irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &conn->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem irq_context: 0 &rdma_nl_types[idx].sem devices_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock stock_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 &rq->__lock irq_context: 0 (wq_completion)bond8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock key#22 irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock key#22 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex irq_context: 0 &smc->clcsock_release_lock rtnl_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET &mm->mmap_lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET &mm->mmap_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-slock-AF_INET irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu &obj_hash[i].lock pool_lock irq_context: softirq (&n->timer) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &fsnotify_mark_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock hwsim_radio_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &data->lock irq_context: 0 rtnl_mutex rcu_read_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem &base->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#12 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#12 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&policy->timer) irq_context: softirq (&policy->timer) &policy->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &im->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &c->lock irq_context: 0 sb_writers#8 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 tomoyo_ss fill_pool_map-wait-type-override pool_lock irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: softirq (&ndev->rs_timer) batched_entropy_u8.lock crngs.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 tracepoints_mutex tracepoints_mutex.wait_lock irq_context: 0 tracepoints_mutex.wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET6 &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex oom_adj_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex.wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET clock-AF_INET irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET crngs.lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET &asoc->wait irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &base->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&n->timer) rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dgram_lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 krc.lock &base->lock &obj_hash[i].lock irq_context: softirq (&n->timer) &n->lock &n->list_lock irq_context: softirq (&n->timer) &n->lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 dgram_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET &c->lock irq_context: 0 &p->alloc_lock rcu_read_lock &p->pi_lock irq_context: 0 &p->alloc_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &p->alloc_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_sockopt_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock pcpu_alloc_mutex irq_context: 0 cb_lock pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock pcpu_alloc_mutex.wait_lock irq_context: 0 cb_lock crngs.lock irq_context: 0 cb_lock ovs_mutex irq_context: 0 cb_lock ovs_mutex fs_reclaim irq_context: 0 cb_lock ovs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock ovs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock ovs_mutex &c->lock irq_context: 0 cb_lock ovs_mutex &n->list_lock irq_context: 0 cb_lock ovs_mutex &n->list_lock &c->lock irq_context: 0 cb_lock ovs_mutex pool_lock#2 irq_context: 0 cb_lock ovs_mutex pcpu_alloc_mutex irq_context: 0 cb_lock ovs_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock ovs_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock ovs_mutex stock_lock irq_context: 0 cb_lock ovs_mutex stack_depot_init_mutex irq_context: 0 cb_lock ovs_mutex crngs.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex fs_reclaim irq_context: 0 cb_lock ovs_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock ovs_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 cb_lock ovs_mutex rtnl_mutex net_rwsem irq_context: 0 cb_lock ovs_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &tn->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &x->wait#9 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &k->list_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex gdp_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock ovs_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock ovs_mutex rtnl_mutex bus_type_sem irq_context: 0 cb_lock ovs_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &____s->seqcount irq_context: 0 cb_lock ovs_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock ovs_mutex rtnl_mutex &dev->power.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex dpm_list_mtx irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex subsys mutex#17 irq_context: 0 cb_lock ovs_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &dir->lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &n->list_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex dev_base_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex input_pool.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &tbl->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex stock_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex sysctl_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex nl_table_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex failover_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &pnettable->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex (console_sem).lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock ovs_mutex rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock ovs_mutex rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex ovs_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex _xmit_ETHER irq_context: 0 cb_lock ovs_mutex.wait_lock irq_context: 0 cb_lock nl_table_lock irq_context: 0 cb_lock nl_table_wait.lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock ovs_mutex pcpu_lock irq_context: 0 cb_lock ovs_mutex &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex krc.lock irq_context: 0 cb_lock ovs_mutex &dir->lock#2 irq_context: 0 cb_lock ovs_mutex (console_sem).lock irq_context: 0 cb_lock ovs_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock ovs_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock ovs_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock ovs_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock ovs_mutex &rq->__lock irq_context: 0 cb_lock ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock pcpu_lock irq_context: 0 cb_lock &data->lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond11 &rq->__lock irq_context: 0 (wq_completion)bond11 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 kn->active#60 fs_reclaim irq_context: 0 kn->active#60 fs_reclaim &rq->__lock irq_context: 0 kn->active#60 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#60 &c->lock irq_context: 0 kn->active#60 stock_lock irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 kn->active#60 &n->list_lock irq_context: 0 kn->active#60 &n->list_lock &c->lock irq_context: 0 kn->active#60 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10 &rq->__lock irq_context: 0 (wq_completion)bond10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock nl_table_wait.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_UNIX fs_reclaim irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_UNIX fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_UNIX fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_UNIX fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_UNIX fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_UNIX fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_UNIX free_vmap_area_lock irq_context: 0 sk_lock-AF_UNIX vmap_area_lock irq_context: 0 sk_lock-AF_UNIX &____s->seqcount irq_context: 0 sk_lock-AF_UNIX stock_lock irq_context: 0 sk_lock-AF_UNIX &c->lock irq_context: 0 sk_lock-AF_UNIX pcpu_alloc_mutex irq_context: 0 sk_lock-AF_UNIX pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_UNIX &obj_hash[i].lock irq_context: 0 sk_lock-AF_UNIX &rq->__lock irq_context: 0 sk_lock-AF_UNIX &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_UNIX pack_mutex irq_context: 0 sk_lock-AF_UNIX batched_entropy_u32.lock irq_context: 0 sk_lock-AF_UNIX text_mutex irq_context: 0 sk_lock-AF_UNIX text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_UNIX &fp->aux->used_maps_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq rcu_state.barrier_lock fill_pool_map-wait-type-override &n->list_lock irq_context: hardirq rcu_state.barrier_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_UNIX &n->list_lock irq_context: 0 sk_lock-AF_UNIX &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &obj_hash[i].lock pool_lock irq_context: 0 ebt_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 ebt_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 &data->lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 pool_lock#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock &conn->chan_lock &rq->__lock irq_context: 0 &hdev->lock &conn->chan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem &bgl->locks[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex ima_extend_list_mutex.wait_lock irq_context: 0 &iint->mutex ima_extend_list_mutex.wait_lock irq_context: 0 &iint->mutex &p->pi_lock irq_context: 0 &iint->mutex &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &list->lock#39 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rlock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &list->lock#39 rlock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex team->team_lock_key#10 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &meta->lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock &rq->__lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#11 irq_context: 0 rtnl_mutex team->team_lock_key#11 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#11 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex __ip_vs_mutex irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 rlock-AF_INET6 irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &f->f_lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock &new->fa_lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &net->smc.mutex_fback_rsn irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_QIPCRTR irq_context: 0 sk_lock-AF_QIPCRTR slock-AF_QIPCRTR irq_context: 0 sk_lock-AF_QIPCRTR &rq->__lock irq_context: 0 sk_lock-AF_QIPCRTR &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_QIPCRTR irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&asoc->timers[i]) irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 pool_lock#2 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &list->lock#21 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &data->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle key#4 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR slock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR clock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rlock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_QIPCRTR irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex &sem->wait_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex remove_cache_srcu irq_context: 0 cb_lock genl_mutex rfkill_global_mutex remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 cb_lock &rdev->wiphy.mtx batched_entropy_u8.lock irq_context: 0 cb_lock &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock rcu_read_lock stock_lock irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 br_ioctl_mutex rtnl_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)phy19 irq_context: 0 (wq_completion)phy19 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy19 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex stock_lock irq_context: 0 br_ioctl_mutex rtnl_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex crngs.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 br_ioctl_mutex rtnl_mutex net_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &tn->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &x->wait#9 irq_context: 0 br_ioctl_mutex rtnl_mutex &k->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex gdp_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &____s->seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex bus_type_sem irq_context: 0 br_ioctl_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex &dev->power.lock irq_context: 0 br_ioctl_mutex rtnl_mutex dpm_list_mtx irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &dir->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 br_ioctl_mutex rtnl_mutex dev_base_lock irq_context: 0 br_ioctl_mutex rtnl_mutex input_pool.lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &tbl->lock irq_context: 0 br_ioctl_mutex rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex failover_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rhashtable_bucket irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &sem->wait_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &pnettable->lock irq_context: 0 br_ioctl_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &base->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&(&br->gc_work)->work) irq_context: 0 br_ioctl_mutex rtnl_mutex kernfs_idr_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 br_ioctl_mutex rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 br_ioctl_mutex rtnl_mutex cpu_hotplug_lock irq_context: 0 br_ioctl_mutex rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 br_ioctl_mutex rtnl_mutex bpf_devs_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock krc.lock irq_context: 0 br_ioctl_mutex rtnl_mutex class irq_context: 0 br_ioctl_mutex rtnl_mutex (&tbl->proxy_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex &base->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ul->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &net->xdp.lock irq_context: 0 br_ioctl_mutex rtnl_mutex krc.lock irq_context: 0 br_ioctl_mutex rtnl_mutex mirred_list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &nft_net->commit_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ent->pde_unload_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ndev->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_query_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_report_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip4_mc_router_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip4_other_query.timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip4_own_query.timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip6_mc_router_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip6_other_query.timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip6_own_query.timer) irq_context: 0 br_ioctl_mutex rtnl_mutex &pnn->pndevs.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &pnn->routes.lock irq_context: 0 br_ioctl_mutex rtnl_mutex target_list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &bridge_netdev_addr_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex &br->multicast_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&br->mcast_gc_work) irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&br->mcast_gc_work) &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &rq->__lock irq_context: 0 wq_pool_attach_mutex wq_pool_attach_mutex.wait_lock irq_context: 0 wq_pool_attach_mutex &rq->__lock irq_context: 0 wq_pool_attach_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &rq->__lock irq_context: 0 wq_pool_attach_mutex.wait_lock irq_context: 0 wq_pool_attach_mutex &cfs_rq->removed.lock irq_context: 0 wq_pool_attach_mutex &obj_hash[i].lock irq_context: 0 wq_pool_attach_mutex pool_lock#2 irq_context: 0 wq_pool_attach_mutex &obj_hash[i].lock pool_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 br_ioctl_mutex rtnl_mutex &ht->mutex irq_context: 0 br_ioctl_mutex rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 br_ioctl_mutex rtnl_mutex deferred_probe_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex deferred_probe_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex deferred_probe_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex device_links_lock irq_context: 0 br_ioctl_mutex rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex.wait_lock irq_context: 0 br_ioctl_mutex &p->pi_lock irq_context: 0 br_ioctl_mutex &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 br_ioctl_mutex dev_base_lock irq_context: 0 br_ioctl_mutex lweventlist_lock irq_context: 0 br_ioctl_mutex stock_lock irq_context: 0 br_ioctl_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex krc.lock irq_context: 0 br_ioctl_mutex &dir->lock#2 irq_context: 0 br_ioctl_mutex &dir->lock#2 &obj_hash[i].lock irq_context: 0 br_ioctl_mutex &dir->lock#2 pool_lock#2 irq_context: 0 br_ioctl_mutex netdev_unregistering_wq.lock irq_context: 0 br_ioctl_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rnp->exp_wq[3] irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy20 irq_context: 0 (wq_completion)phy20 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy20 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock &lru->node[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX chan_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy21 irq_context: 0 (wq_completion)phy21 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy21 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pgd_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex key irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock pool_lock irq_context: 0 &data->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &data->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &data->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &data->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &data->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&app->join_timer) &app->lock &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[0] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &n->list_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &n->list_lock &c->lock irq_context: 0 key#26 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &vma->vm_lock->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock key#22 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->packet.sklist_lock &rq->__lock irq_context: 0 &net->packet.sklist_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_node_0 irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &base->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 &rq->__lock irq_context: 0 (wq_completion)bond4#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex quarantine_lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 tracepoints_mutex batched_entropy_u8.lock irq_context: 0 tracepoints_mutex kfence_freelist_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &wg->device_update_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &wg->device_update_lock &handshake->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &wg->device_update_lock &peer->endpoint_lock irq_context: 0 &f->f_pos_lock sb_writers#8 quarantine_lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &data->lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &data->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &data->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_wait_done_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_done_commit irq_context: 0 pcpu_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal &journal->j_wait_commit irq_context: 0 sb_writers#4 sb_internal &journal->j_wait_done_commit irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &n->list_lock irq_context: 0 rtnl_mutex &br->lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &mapping->private_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) &dir->lock irq_context: softirq (&icsk->icsk_retransmit_timer) &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) stock_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pcpu_lock stock_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle irq_context: 0 (wq_completion)netns net_cleanup_work &rq->__lock irq_context: 0 rtnl_mutex &block->lock &rq->__lock irq_context: 0 rtnl_mutex &block->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &chain->filter_chain_lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &block->lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &block->proto_destroy_lock irq_context: 0 rtnl_mutex &block->cb_lock irq_context: 0 rtnl_mutex &block->cb_lock &rq->__lock irq_context: 0 rtnl_mutex &block->cb_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->proto_destroy_lock irq_context: 0 rtnl_mutex &block->lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &block->lock kfence_freelist_lock irq_context: 0 rtnl_mutex &block->lock &meta->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sighand->siglock stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 tomoyo_ss pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &info->lock key#9 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock kfence_freelist_lock irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &meta->lock irq_context: 0 rtnl_mutex bus_type_sem &rq->__lock irq_context: 0 rtnl_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 sk_lock-AF_INET6 krc.lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET6 krc.lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq rcu_callback fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &rq->__lock &cfs_rq->removed.lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &data->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 namespace_sem pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 remove_cache_srcu irq_context: 0 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem vmap_area_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem purge_vmap_area_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem purge_vmap_area_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem purge_vmap_area_lock rcu_read_lock pool_lock#2 irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &disk->open_mutex &lo->lo_mutex rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 fs_reclaim rcu_node_0 irq_context: 0 fs_reclaim &rcu_state.expedited_wq irq_context: 0 fs_reclaim &rcu_state.expedited_wq &p->pi_lock irq_context: 0 fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 tomoyo_ss &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 rtnl_mutex stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#11 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &dentry->d_lock irq_context: 0 sb_writers#11 tomoyo_ss irq_context: 0 sb_writers#11 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 tomoyo_ss &c->lock irq_context: 0 sb_writers#11 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#11 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 kn->active#61 fs_reclaim irq_context: 0 sb_writers#11 kn->active#61 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 kn->active#61 &c->lock irq_context: 0 sb_writers#11 kn->active#61 stock_lock irq_context: 0 sb_writers#11 kn->active#61 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#11 kn->active#61 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#11 kn->active#61 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 kn->active#61 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#11 &sb->s_type->i_mutex_key#16 irq_context: 0 sb_writers#11 iattr_mutex irq_context: 0 sb_writers#11 &sb->s_type->i_mutex_key#16 tk_core.seq.seqcount irq_context: 0 sb_writers#11 &sb->s_type->i_mutex_key#16 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &sb->s_type->i_mutex_key#16 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#11 &sb->s_type->i_mutex_key#16 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#11 &sb->s_type->i_mutex_key#16 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &sb->s_type->i_mutex_key#16 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#11 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#11 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#11 &xattrs->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 kn->active#61 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &f->f_pos_lock rcu_read_lock pgd_lock irq_context: 0 &f->f_pos_lock rcu_read_lock stock_lock irq_context: 0 &f->f_pos_lock rcu_read_lock key irq_context: 0 &f->f_pos_lock rcu_read_lock pcpu_lock irq_context: 0 &f->f_pos_lock rcu_read_lock percpu_counters_lock irq_context: 0 &f->f_pos_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &lruvec->lru_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) pool_lock#2 irq_context: 0 &vma->vm_lock->lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#4 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#3 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 &sch->q.lock irq_context: 0 fanout_mutex fs_reclaim irq_context: 0 fanout_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 fanout_mutex &c->lock irq_context: 0 fanout_mutex &n->list_lock irq_context: 0 fanout_mutex &n->list_lock &c->lock irq_context: 0 fanout_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fanout_mutex pool_lock#2 irq_context: 0 fanout_mutex &po->bind_lock irq_context: 0 fanout_mutex &po->bind_lock ptype_lock irq_context: 0 fanout_mutex &po->bind_lock &match->lock irq_context: 0 fanout_mutex &po->bind_lock &match->lock ptype_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &match->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &match->lock ptype_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 krc.lock irq_context: 0 sb_writers#8 kn->active#5 &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &n->list_lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &pnettable->lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#3 rcu_node_0 irq_context: 0 sb_writers#3 &rcu_state.expedited_wq irq_context: 0 sb_writers#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#7 &cfs_rq->removed.lock irq_context: 0 sb_writers#7 &obj_hash[i].lock irq_context: 0 sb_writers#7 pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock kfence_freelist_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex crypto_alg_sem irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex fs_reclaim irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex pool_lock#2 irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex &c->lock irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex &n->list_lock irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_INET &dir->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &dir->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-clock-AF_INET irq_context: 0 sk_lock-AF_INET &token_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET slock-AF_INET irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET slock-AF_INET &sk->sk_lock.wq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET slock-AF_INET irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET &sk->sk_lock.wq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET &sk->sk_lock.wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 slock-AF_INET irq_context: 0 sk_lock-AF_INET &msk->pm.lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET stock_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET batched_entropy_u8.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET kfence_freelist_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->arp_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &meta->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_node_0 irq_context: 0 tomoyo_ss rename_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_writers#4 &cfs_rq->removed.lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 &pipe->mutex/1 &u->iolock irq_context: 0 &pipe->mutex/1 &u->iolock rlock-AF_UNIX irq_context: 0 &pipe->mutex/1 &u->iolock &rq->__lock irq_context: 0 &pipe->mutex/1 &u->iolock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &ei->socket.wq.wait irq_context: 0 &pipe->mutex/1 &u->iolock stock_lock irq_context: 0 &pipe->mutex/1 &u->iolock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &u->iolock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &meta->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq irq_context: 0 lock pidmap_lock batched_entropy_u8.lock irq_context: 0 lock pidmap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 pcpu_lock stock_lock irq_context: softirq (&tsc_sync_check_timer) irq_context: softirq (&tsc_sync_check_timer) &obj_hash[i].lock irq_context: softirq (&tsc_sync_check_timer) &base->lock irq_context: softirq (&tsc_sync_check_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 fs_reclaim &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 &ul->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->arp_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond8#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex rcu_read_lock_bh &data->lock irq_context: 0 rtnl_mutex rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &bat_priv->tt.changes_list_lock irq_context: 0 rtnl_mutex rcu_read_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock key#16 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &bond->mode_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 &rq->__lock irq_context: 0 (wq_completion)bond8#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->alb_work)->timer irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &dev_addr_list_lock_key/1 rcu_read_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &dev_addr_list_lock_key/1 rcu_read_lock &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock/2 irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock/2 rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->alb_work)->work) irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->alb_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->alb_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->alb_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &dev_addr_list_lock_key irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock/2 rcu_read_lock &bond->stats_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &xt[i].mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &xt[i].mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &xt[i].mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 pgd_lock irq_context: 0 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 key irq_context: 0 &sb->s_type->i_mutex_key#8 pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#8 percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#8 pcpu_lock stock_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&n->timer) &n->lock &____s->seqcount#2 irq_context: softirq (&n->timer) &n->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pcpu_lock stock_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#15 irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sighand->siglock &base->lock irq_context: 0 &sighand->siglock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 pgd_lock irq_context: 0 sb_writers#4 key irq_context: 0 sb_writers#4 pcpu_lock irq_context: 0 sb_writers#4 percpu_counters_lock irq_context: 0 sb_writers#4 pcpu_lock stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 link_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 link_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock pool_lock#2 irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->packet.sklist_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 kfence_freelist_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex quarantine_lock irq_context: 0 &f->f_pos_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 bcm_notifier_lock irq_context: 0 (wq_completion)bond5 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &bond->mode_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &dev_addr_list_lock_key/1 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &sb->s_type->i_mutex_key#10 bcm_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &data->lock &base->lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &bond->mode_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &dev_addr_list_lock_key/1 rcu_read_lock &batadv_netdev_addr_lock_key &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) pool_lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &pnn->routes.lock &rq->__lock irq_context: 0 rtnl_mutex &pnn->routes.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &base->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnn->pndevs.lock &rq->__lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rename_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 sk_lock-AF_INET6 crngs.lock base_crng.lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &base->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 &child->perf_event_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock stock_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock key irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock kfence_freelist_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &data->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &folio_wait_table[i] irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &base->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex batched_entropy_u8.lock crngs.lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock percpu_counters_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &base->lock &obj_hash[i].lock irq_context: 0 &sighand->siglock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) &meta->lock irq_context: softirq (&n->timer) kfence_freelist_lock irq_context: softirq rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh quarantine_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &c->lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock remove_cache_srcu irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex nl_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex.wait_lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_node_0 irq_context: 0 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &base->lock irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &x->wait#24 irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &rq->__lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset ip_set_ref_lock irq_context: 0 &nft_net->commit_mutex &base->lock irq_context: 0 &nft_net->commit_mutex &base->lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &nft_net->commit_mutex (work_completion)(&(&priv->gc_work)->work) irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex (work_completion)(&ht->run_work) irq_context: 0 &nft_net->commit_mutex &ht->mutex irq_context: 0 &nft_net->commit_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex &ht->mutex pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex.wait_lock irq_context: 0 nfnl_subsys_ipset &p->pi_lock irq_context: 0 nfnl_subsys_ipset &p->pi_lock &rq->__lock irq_context: 0 nfnl_subsys_ipset &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &nft_net->commit_mutex &obj_hash[i].lock pool_lock irq_context: 0 &nft_net->commit_mutex &____s->seqcount#2 irq_context: 0 nfnl_subsys_ipset &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &data->lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex (console_sem).lock irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner_lock irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex &lock->wait_lock irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex &rq->__lock irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &n->list_lock &c->lock irq_context: 0 kn->active#62 fs_reclaim irq_context: 0 kn->active#62 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#62 &c->lock irq_context: 0 kn->active#62 stock_lock irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock &wq#2 irq_context: 0 kn->active#63 fs_reclaim irq_context: 0 kn->active#63 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#63 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#63 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#63 stock_lock irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 fs_reclaim irq_context: 0 sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 stock_lock irq_context: 0 sb_writers#11 &c->lock irq_context: 0 sb_writers#11 pool_lock#2 irq_context: 0 sb_writers#11 &____s->seqcount irq_context: 0 sb_writers#11 &p->lock irq_context: 0 sb_writers#11 &p->lock &rq->__lock irq_context: 0 sb_writers#11 &p->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &p->lock fs_reclaim irq_context: 0 sb_writers#11 &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &p->lock stock_lock irq_context: 0 sb_writers#11 &p->lock &c->lock irq_context: 0 sb_writers#11 &p->lock pool_lock#2 irq_context: 0 sb_writers#11 &p->lock &of->mutex irq_context: 0 sb_writers#11 &p->lock &of->mutex &rq->__lock irq_context: 0 sb_writers#11 &p->lock &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex css_set_lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex fs_reclaim irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex pool_lock#2 irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex &rq->__lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex &base->lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#63 &cgrp->pidlist_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#11 &obj_hash[i].lock irq_context: 0 sb_writers#11 tk_core.seq.seqcount irq_context: 0 sb_writers#11 sb_writers#11 mount_lock irq_context: 0 sb_writers#11 sb_writers#11 tk_core.seq.seqcount irq_context: 0 sb_writers#11 sb_writers#11 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 sb_writers#11 &rq->__lock irq_context: 0 sb_writers#11 sb_writers#11 &wb->list_lock irq_context: 0 sb_writers#11 sb_writers#11 &wb->list_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &of->mutex irq_context: 0 sb_writers#11 &of->mutex &rq->__lock irq_context: 0 sb_writers#11 &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock fs_reclaim irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock cgroup_file_kn_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->alloc_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->alloc_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex cpuset_attach_wq.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 css_set_lock cgroup_file_kn_lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &base->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq rcu_callback css_set_lock irq_context: softirq rcu_callback css_set_lock &obj_hash[i].lock irq_context: softirq rcu_callback css_set_lock pool_lock#2 irq_context: softirq rcu_callback css_set_lock krc.lock irq_context: 0 nfnl_subsys_nftables &rq->__lock irq_context: 0 nfnl_subsys_nftables &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_node_0 irq_context: softirq &(&l->destroy_dwork)->timer irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_pidlist_destroy irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &obj_hash[i].lock irq_context: 0 kn->active#63 &c->lock irq_context: 0 kn->active#63 &n->list_lock irq_context: 0 kn->active#63 &n->list_lock &c->lock irq_context: 0 kn->active#63 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#62 cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#62 cpu_hotplug_lock cpuset_mutex irq_context: 0 sb_writers#11 &of->mutex kn->active#62 cpu_hotplug_lock cpuset_mutex fs_reclaim irq_context: 0 sb_writers#11 &of->mutex kn->active#62 cpu_hotplug_lock cpuset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex kn->active#62 cpu_hotplug_lock cpuset_mutex pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex kn->active#62 cpu_hotplug_lock cpuset_mutex callback_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#62 cpu_hotplug_lock cpuset_mutex css_set_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#62 cpu_hotplug_lock cpuset_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 rcu_read_lock key#22 irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &list->lock#16 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &list->lock#16 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rdev->bss_lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)phy9 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock &base->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &lock->wait_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rdev->bss_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override &c->lock irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rdev->bss_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount#2 irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock &c->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rdev->bss_lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)phy7 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &im->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex text_mutex.wait_lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex &p->pi_lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &p->pi_lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &rq->__lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &iint->mutex &folio_wait_table[i] irq_context: 0 sb_writers#4 &iint->mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 mount_lock irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &c->lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &n->list_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &iint->mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET krc.lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (debug_obj_work).work &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &n->list_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex &p->alloc_lock irq_context: 0 sb_writers#4 &iint->mutex &list->lock irq_context: 0 sb_writers#4 &iint->mutex kauditd_wait.lock irq_context: 0 sb_writers#4 &iint->mutex kauditd_wait.lock &p->pi_lock irq_context: 0 sb_writers#4 &iint->mutex kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET key#24 irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex jump_label_mutex.wait_lock irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex &pool->lock irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &meta->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock console_owner_lock irq_context: 0 &f->f_pos_lock console_owner irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex text_mutex.wait_lock irq_context: 0 (crypto_chain).rwsem &____s->seqcount#2 irq_context: 0 (crypto_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 crypto_alg_sem &rq->__lock irq_context: 0 crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &list->lock#15 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 lock map_idr_lock &n->list_lock irq_context: 0 lock map_idr_lock &n->list_lock &c->lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex crypto_alg_sem irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex fs_reclaim irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex pool_lock#2 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (kmod_concurrent_max).lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &rq->__lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &c->lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &n->list_lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &x->wait#17 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex running_helpers_waitq.lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex remove_cache_srcu irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex crypto_alg_sem irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem fs_reclaim irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem &c->lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem pool_lock#2 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem kthread_create_lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem &p->pi_lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem &x->wait irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem &rq->__lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &x->wait#21 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &base->lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &base->lock &obj_hash[i].lock irq_context: 0 crypto_alg_sem &rq->__lock irq_context: 0 crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex (&timer.timer) irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &____s->seqcount irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex crngs.lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &rng->jent_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rng->jent_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rng->jent_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rng->jent_lock pool_lock#2 irq_context: softirq (&peer->timer_zero_key_material) irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &drbg->drbg_mutex &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#6 &rq->__lock irq_context: 0 sb_writers#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &drbg->drbg_mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &data->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &base->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&strp->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &ht->mutex &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &n->list_lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 kfence_freelist_lock irq_context: 0 cb_lock rtnl_mutex &dev->power.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer)#2 &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer)#2 &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &pipe->wr_wait &ep->lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &ep->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 br_ioctl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &ul->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults batched_entropy_u8.lock crngs.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) &____s->seqcount#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&hsr->prune_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&hsr->prune_timer) fill_pool_map-wait-type-override &c->lock irq_context: softirq (&hsr->prune_timer) fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&hsr->prune_timer) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&hsr->prune_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &base->lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &tfile->napi_mutex irq_context: 0 &tfile->napi_mutex &____s->seqcount irq_context: 0 &tfile->napi_mutex pool_lock#2 irq_context: 0 &tfile->napi_mutex &c->lock irq_context: 0 &tfile->napi_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 key#24 irq_context: 0 &tfile->napi_mutex &n->list_lock &c->lock irq_context: 0 &tfile->napi_mutex &rq->__lock irq_context: 0 &tfile->napi_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tfile->napi_mutex &mm->mmap_lock irq_context: 0 &tfile->napi_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock &data->lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &tbl->lock &n->list_lock irq_context: 0 rtnl_mutex &tbl->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER &n->list_lock irq_context: 0 rtnl_mutex _xmit_ETHER &n->list_lock &c->lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &bridge_netdev_addr_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 br_ioctl_mutex &obj_hash[i].lock pool_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 &xa->xa_lock#7 &n->list_lock irq_context: 0 &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &sb->s_type->i_lock_key#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &dir->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-slock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &in_dev->mc_tomb_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &im->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &base->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex kthread_create_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &x->wait irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &ipvs->sync_buff_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh _xmit_NONE#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh _xmit_NONE#2 &data->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh _xmit_NONE#2 &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh _xmit_NONE#2 pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh _xmit_NONE#2 &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex &ht->lock irq_context: 0 sb_writers#4 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_list_lock irq_context: 0 &ei->i_es_lock key#6 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &ei->i_data_sem &mapping->private_lock irq_context: softirq rcu_callback &rsp->gp_wait &obj_hash[i].lock irq_context: softirq rcu_callback &rsp->gp_wait pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &bgl->locks[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#7 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#7 stock_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#7 pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rsp->gp_wait &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rsp->gp_wait &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rsp->gp_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &sem->waiters irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock krc.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_node_0 irq_context: 0 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 pcpu_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &ei->i_data_sem &n->list_lock irq_context: 0 &ei->i_data_sem &n->list_lock &c->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rnp->exp_wq[1] irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &group->inotify_data.idr_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock &pa->pa_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock &pa->pa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->packet.sklist_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rnp->exp_wq[2] irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 tomoyo_ss console_owner_lock irq_context: 0 tomoyo_ss console_owner irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem (console_sem).lock irq_context: 0 &sbi->s_writepages_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &sbi->s_writepages_rwsem console_lock console_srcu console_owner irq_context: 0 &sbi->s_writepages_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem (console_sem).lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem console_owner irq_context: 0 &sbi->s_writepages_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem console_lock console_srcu console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem console_lock console_srcu console_owner irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pgdat->reclaim_wait[i] irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &pgdat->reclaim_wait[i] irq_context: 0 sb_writers#4 (&timer.timer) irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem console_owner_lock irq_context: 0 &sbi->s_writepages_rwsem console_owner irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &pgdat->reclaim_wait[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &pgdat->reclaim_wait[i] &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &pgdat->reclaim_wait[i] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &pgdat->reclaim_wait[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &sem->waiters irq_context: 0 &sbi->s_writepages_rwsem &sem->waiters rcu_read_lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem &sem->waiters rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &sem->waiters rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->waiters &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->waiters &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->waiters &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &mm->mmap_lock pgd_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock stock_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock key irq_context: 0 &xt[i].mutex &mm->mmap_lock pcpu_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock percpu_counters_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &cfs_rq->removed.lock irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rnp->exp_wq[3] irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) pool_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lock->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET key#24 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lock->wait_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim &obj_hash[i].lock irq_context: 0 &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &obj_hash[i].lock irq_context: 0 &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex pgd_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex pcpu_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex percpu_counters_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &bgl->locks[i].lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#5 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#5 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#5 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#7 rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &disk->open_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle (console_sem).lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle console_lock console_srcu console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle console_lock console_srcu console_owner irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->waiters &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem (console_sem).lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem console_lock console_srcu console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem console_lock console_srcu console_owner irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &sem->waiters irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->waiters &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock (console_sem).lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock console_lock console_srcu console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock console_lock console_srcu console_owner irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET batched_entropy_u32.lock irq_context: 0 syslog_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &pool->lock#3 irq_context: softirq &pool->lock#3 irq_context: softirq &pool->wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rsp->gp_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lock->wait_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_node_0 irq_context: 0 kn->active#5 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex bit_wait_table + i irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#3 rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &bgl->locks[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &p->lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &p->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &p->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pgd_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pcpu_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem percpu_counters_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pcpu_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond13 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 rcu_read_lock &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#8 &rcu_state.expedited_wq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &base->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle kfence_freelist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock rename_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &meta->lock irq_context: 0 &xt[i].mutex rcu_read_lock pgd_lock irq_context: 0 &xt[i].mutex rcu_read_lock stock_lock irq_context: 0 &xt[i].mutex rcu_read_lock key irq_context: 0 &xt[i].mutex rcu_read_lock pcpu_lock irq_context: 0 &xt[i].mutex rcu_read_lock percpu_counters_lock irq_context: 0 &xt[i].mutex rcu_read_lock pcpu_lock stock_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem rcu_node_0 irq_context: 0 &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock reuseport_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock pool_lock#2 irq_context: softirq rcu_callback reuseport_ida.xa_lock irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock crngs.lock base_crng.lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 loop_validate_mutex loop_validate_mutex.wait_lock irq_context: 0 loop_validate_mutex.wait_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &dd->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pool->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &sem->waiters rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock rcu_node_0 irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pgd_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock stock_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pcpu_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock percpu_counters_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pcpu_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock pgd_lock irq_context: 0 &f->f_pos_lock &p->lock stock_lock irq_context: 0 &f->f_pos_lock &p->lock key irq_context: 0 &f->f_pos_lock &p->lock pcpu_lock irq_context: 0 &f->f_pos_lock &p->lock percpu_counters_lock irq_context: 0 &f->f_pos_lock &p->lock pcpu_lock stock_lock irq_context: 0 rcu_read_lock &n->lock &____s->seqcount irq_context: 0 rcu_read_lock &n->lock pool_lock#2 irq_context: 0 rcu_read_lock &n->lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock &ul->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#7 pool_lock#2 irq_context: 0 clock-AF_NETLINK irq_context: 0 genl_sk_destructing_waitq.lock irq_context: 0 wlock-AF_NETLINK irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xdp.lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xdp.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh &data->lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 sk_lock-AF_INET6/1 &list->lock#20 irq_context: 0 &net->sctp.addr_wq_lock irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 pool_lock#2 irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 clock-AF_INET6 irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rsp->gp_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: softirq (&n->timer) k-slock-AF_INET &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock key#22 irq_context: 0 clock-AF_INET irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 clock-AF_INET6 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock kfence_freelist_lock irq_context: 0 lock btf_idr_lock &c->lock irq_context: 0 lock btf_idr_lock &n->list_lock irq_context: 0 lock btf_idr_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#8 bit_wait_table + i irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink fs_reclaim irq_context: 0 nfnl_subsys_ctnetlink fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ctnetlink pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &obj_hash[i].lock pool_lock irq_context: 0 nfnl_subsys_ctnetlink &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 ppp_mutex irq_context: 0 ppp_mutex &rq->__lock irq_context: 0 ppp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &cfs_rq->removed.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#7 &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 bt_proto_lock bnep_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 bnep_sk_list.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock stock_lock irq_context: 0 &u->iolock &____s->seqcount irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key ptlock_ptr(page) irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock nl_table_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &mapping->private_lock irq_context: 0 &po->bind_lock ptype_lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lock->wait_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 &mm->mmap_lock quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &obj_hash[i].lock irq_context: 0 __ip_vs_mutex &mm->mmap_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->alb_work)->work) &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &h->lhash2[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &h->lhash2[i].lock k-clock-AF_INET6 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &hashinfo->ehash_locks[i] irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &n->list_lock &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rlock-AF_NETLINK irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_QIPCRTR &mm->mmap_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex inet_diag_table_mutex irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex (kmod_concurrent_max).lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex fs_reclaim irq_context: 0 sock_diag_mutex sock_diag_table_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sock_diag_mutex sock_diag_table_mutex pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex &x->wait#17 irq_context: 0 sock_diag_mutex sock_diag_table_mutex running_helpers_waitq.lock irq_context: 0 sock_diag_mutex &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &xs->mutex irq_context: 0 &xs->mutex fs_reclaim irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xs->mutex pool_lock#2 irq_context: 0 &xs->mutex free_vmap_area_lock irq_context: 0 &xs->mutex vmap_area_lock irq_context: 0 &xs->mutex &____s->seqcount irq_context: 0 rtnl_mutex &xs->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &n->list_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond5 &rq->__lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock genl_mutex &base->lock irq_context: 0 cb_lock genl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex (&timer.timer) irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 ebt_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy22 irq_context: 0 (wq_completion)phy22 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy22 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 sock_diag_mutex (kmod_concurrent_max).lock irq_context: 0 sock_diag_mutex &obj_hash[i].lock irq_context: 0 sock_diag_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sock_diag_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sock_diag_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sock_diag_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sock_diag_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex &x->wait#17 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start pool_lock#2 irq_context: 0 sock_diag_mutex running_helpers_waitq.lock irq_context: 0 sock_diag_mutex rcu_read_lock &n->list_lock irq_context: 0 sock_diag_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 &ep->mtx rcu_node_0 irq_context: 0 tomoyo_ss batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 key#10 irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &n->list_lock irq_context: 0 &vma->vm_lock->lock &n->list_lock &c->lock irq_context: 0 nl_table_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex lock kernfs_idr_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock kfence_freelist_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 key#27 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 key#27 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &match->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &match->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock &dir->lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock pcpu_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 acaddr_hash_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock _xmit_ETHER irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &base->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &ndev->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex acaddr_hash_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rt6_exception_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock &____s->seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock stock_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock &mm->page_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock ptlock_ptr(page) irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &meta->lock irq_context: 0 elock-AF_INET6 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock kfence_freelist_lock irq_context: 0 sk_lock-AF_CAN clock-AF_CAN irq_context: 0 rtnl_mutex wq_pool_mutex &rq->__lock irq_context: 0 rtnl_mutex wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 sk_lock-AF_CAN proc_inum_ida.xa_lock irq_context: 0 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &list->lock#5 irq_context: 0 sk_lock-AF_CAN tk_core.seq.seqcount irq_context: 0 sk_lock-AF_CAN &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &ent->pde_unload_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN proc_inum_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] batched_entropy_u8.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] batched_entropy_u8.lock crngs.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] kfence_freelist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET pcpu_lock stock_lock irq_context: 0 &ei->i_es_lock irq_context: 0 slock-AF_NETLINK &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &cfs_rq->removed.lock irq_context: 0 data_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 data_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN slock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN clock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN rlock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_ISDN irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &sem->wait_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mount_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sk_lock-AF_INET sctp_assocs_id_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock &pa->pa_lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &base->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 cgroup_threadgroup_rwsem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: softirq (&q->timer) irq_context: softirq (&q->timer) rcu_read_lock &q->lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock &obj_hash[i].lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq (&q->timer) rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&q->timer) rcu_read_lock &q->lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&q->timer) &obj_hash[i].lock irq_context: softirq (&q->timer) &data->lock irq_context: softirq (&q->timer) pool_lock#2 irq_context: softirq (&q->timer) &obj_hash[i].lock pool_lock irq_context: 0 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)bond14 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_node_0 irq_context: 0 rtnl_mutex lweventlist_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex lweventlist_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond15 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 key#22 irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#14 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#14 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#14 &meta->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#14 kfence_freelist_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#14 quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 percpu_counters_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &rnp->exp_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex irq_context: 0 &iint->mutex ima_extend_list_mutex batched_entropy_u8.lock irq_context: 0 &iint->mutex ima_extend_list_mutex kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &base->lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 &xt[i].mutex remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 lock#5 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &obj_hash[i].lock irq_context: 0 misc_mtx fs_reclaim &rq->__lock irq_context: 0 misc_mtx fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock lock#5 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &s->s_inode_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &bgl->locks[i].lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &s->s_inode_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal jbd2_handle &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem inode_hash_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &lruvec->lru_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &f->f_lock irq_context: 0 &pipe->mutex/1 &f->f_lock fasync_lock irq_context: 0 rcu_read_lock &new->fa_lock irq_context: 0 rcu_read_lock &new->fa_lock &f->f_owner.lock irq_context: 0 &pipe->mutex/1 &f->f_lock fasync_lock &new->fa_lock irq_context: 0 &pipe->mutex/1 &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &f->f_lock fasync_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock &new->fa_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &new->fa_lock &f->f_owner.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &token_hash[i].lock irq_context: 0 &nft_net->commit_mutex nf_ct_proto_mutex irq_context: 0 &nft_net->commit_mutex nf_ct_proto_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex (work_completion)(&(&priv->gc_work)->work)#2 irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &nft_net->commit_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &nft_net->commit_mutex remove_cache_srcu irq_context: 0 &nft_net->commit_mutex remove_cache_srcu quarantine_lock irq_context: 0 &nft_net->commit_mutex remove_cache_srcu &c->lock irq_context: 0 &nft_net->commit_mutex remove_cache_srcu &n->list_lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_node_0 irq_context: 0 &nft_net->commit_mutex fs_reclaim &rq->__lock irq_context: 0 &nft_net->commit_mutex &ht->mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex quarantine_lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 kfence_freelist_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work quarantine_lock irq_context: 0 rtnl_mutex noop_qdisc.q.lock crngs.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex nbd_index_mutex irq_context: 0 cb_lock genl_mutex &nbd->config_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &nbd->config_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &nbd->config_lock &bdev->bd_size_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &q->queue_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 cb_lock genl_mutex &nbd->config_lock set->srcu irq_context: 0 cb_lock genl_mutex &nbd->config_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &nbd->config_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &nbd->config_lock &x->wait#3 irq_context: 0 cb_lock genl_mutex &nbd->config_lock set->srcu irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)nbd-del irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &disk->open_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &disk->open_mutex inode_hash_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &disk->open_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &disk->open_mutex sb_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &bdev->bd_size_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->mq_freeze_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->mq_freeze_lock percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->mq_freeze_lock percpu_ref_switch_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) set->srcu irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->mq_freeze_wq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &bdev->bd_holder_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &root->kernfs_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (&bdi->laptop_mode_wb_timer) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &base->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) bdi_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &rnp->exp_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &wb->work_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) cgwb_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &(&wb->dwork)->timer irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (work_completion)(&(&wb->dwork)->work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &(&wb->bw_dwork)->timer irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (work_completion)(&(&wb->bw_dwork)->work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &bdi->cgwb_release_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &bdi->cgwb_release_mutex cgwb_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) pin_fs_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &dentry->d_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &fsnotify_mark_srcu irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &s->s_inode_list_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &xa->xa_lock#7 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock mount_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) mount_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &root->kernfs_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) kernfs_idr_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &k->list_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) sysfs_symlink_target_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) subsys mutex#38 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) subsys mutex#38 &k->k_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) subsys mutex#38 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &x->wait#9 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) dpm_list_mtx irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &dev->power.lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) deferred_probe_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) device_links_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &p->pi_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) gdp_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &____s->seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock kernfs_idr_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock kernfs_idr_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex pin_fs_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &dentry->d_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &fsnotify_mark_srcu irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &xa->xa_lock#7 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex rcu_read_lock mount_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex mount_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->debugfs_mutex mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) dev_hotplug_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) req_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &x->wait#11 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) subsys mutex#37 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) subsys mutex#37 &k->k_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) subsys mutex#37 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) percpu_ref_switch_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->queue_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (&q->timeout) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (work_completion)(&q->timeout_work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (wq_completion)kintegrityd irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &wq->mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &wq->mutex &pool->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &wq->mutex &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &dentry->d_lock irq_context: 0 sb_writers &s->s_inode_list_lock irq_context: 0 sb_writers &sbinfo->stat_lock irq_context: 0 sb_writers &xa->xa_lock#7 irq_context: 0 sb_writers &fsnotify_mark_srcu irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) set->srcu irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &x->wait#3 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &q->queue_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &tags->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &eq->sysfs_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &eq->sysfs_lock &q->debugfs_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &eq->sysfs_lock &dd->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &eq->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &eq->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock &eq->sysfs_lock pcpu_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->rq_qos_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->rq_qos_mutex &stats->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->rq_qos_mutex &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->rq_qos_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->rq_qos_mutex (&cb->timer) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->rq_qos_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->rq_qos_mutex &base->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->rq_qos_mutex pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &tags->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) cpu_hotplug_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &xa->xa_lock#8 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->unused_hctx_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &set->tag_list_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (&sq->pending_timer) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (work_completion)(&td->dispatch_work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock &blkcg->lock (&sq->pending_timer) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &base->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock &blkcg->lock percpu_counters_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pcpu_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) pcpu_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) bio_slab_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) bio_slab_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) bio_slab_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &xa->xa_lock#9 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &zone->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &zone->lock &____s->seqcount irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &(&ssp->srcu_sup->work)->timer irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (work_completion)(&(&ssp->srcu_sup->work)->work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (&sdp->delay_work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) (work_completion)(&sdp->work) irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) nbd_index_mutex irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) wq_mayday_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &x->wait irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) wq_pool_mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &pool->lock/1 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)nbd-del (work_completion)(&nbd->remove_work) rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &r->producer_lock#5 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET (console_sem).lock irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &rq->__lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tp->lock irq_context: 0 rtnl_mutex &tp->lock pool_lock#2 irq_context: 0 rtnl_mutex &xa->xa_lock#16 irq_context: 0 rtnl_mutex &xa->xa_lock#16 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &head->masks_lock irq_context: 0 rtnl_mutex &block->cb_lock &tp->lock irq_context: 0 (wq_completion)tc_filter_workqueue irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) &obj_hash[i].lock irq_context: 0 tasklist_lock rcu_read_lock &p->pi_lock irq_context: 0 tasklist_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &tp->lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &tp->lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &tp->lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tp->lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &tp->lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &tp->lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &tp->lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tp->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) &ht->mutex irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) &xa->xa_lock#16 irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) pool_lock#2 irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) krc.lock irq_context: 0 (wq_completion)tc_filter_workqueue (work_completion)(&(rwork)->work) &dir->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tp->lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rnp->exp_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#15 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &data->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->queue_stop_reason_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->sta_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->sta_mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (&local->dynamic_ps_timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&sdata->recalc_smps) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->csa_finalize_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->color_change_finalize_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->dfs_cac_timer_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->dfs_cac_timer_work)->work) &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->dfs_cac_timer_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &list->lock#15 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->filter_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#13 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock rtnl_mutex &lock->wait_lock irq_context: 0 cb_lock rtnl_mutex &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &data->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &sem->waiters irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 delayed_uprobe_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_node_0 irq_context: 0 rtnl_mutex uevent_sock_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock quarantine_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &base->lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 key irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &xa->xa_lock#7 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_TUNNEL6 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_TUNNEL6 &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_TUNNEL6 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem lock#4 &lruvec->lru_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &lruvec->lru_lock irq_context: 0 mapping.invalidate_lock &xa->xa_lock#7 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 sb_writers#10 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#10 tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#60 &____s->seqcount irq_context: 0 kn->active#60 pool_lock#2 irq_context: 0 kn->active#60 &rq->__lock irq_context: 0 kn->active#60 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#60 &cfs_rq->removed.lock irq_context: 0 kn->active#60 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex &rq->__lock irq_context: 0 pack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex &cfs_rq->removed.lock irq_context: 0 pack_mutex &obj_hash[i].lock irq_context: 0 pack_mutex &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#10 kn->active#58 &c->lock irq_context: 0 sb_writers#10 kn->active#58 &n->list_lock irq_context: 0 sb_writers#10 kn->active#58 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &cfs_rq->removed.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock bit_wait_table + i irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &c->lock irq_context: 0 &ep->mtx &rcu_state.expedited_wq irq_context: 0 &ep->mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) pool_lock#2 irq_context: 0 rtnl_mutex &ht->mutex &____s->seqcount irq_context: 0 rtnl_mutex &ht->mutex rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#6 nl_table_wait.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &idev->mc_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &fq->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dir->lock#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dir->lock#2 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock fs_reclaim irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock rlock-AF_BLUETOOTH irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#64 fs_reclaim irq_context: 0 kn->active#64 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#64 stock_lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#10 fs_reclaim irq_context: 0 sb_writers#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 stock_lock irq_context: 0 sb_writers#10 &____s->seqcount irq_context: 0 sb_writers#10 &p->lock irq_context: 0 sb_writers#10 &p->lock fs_reclaim irq_context: 0 sb_writers#10 &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &p->lock stock_lock irq_context: 0 sb_writers#10 &p->lock &c->lock irq_context: 0 sb_writers#10 &p->lock &of->mutex irq_context: 0 sb_writers#10 &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex irq_context: 0 sb_writers#10 &of->mutex kn->active#64 shares_mutex irq_context: 0 sb_writers#10 &rq->__lock irq_context: 0 sb_writers#10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 tk_core.seq.seqcount irq_context: 0 sb_writers#10 sb_writers#10 mount_lock irq_context: 0 sb_writers#10 sb_writers#10 tk_core.seq.seqcount irq_context: 0 sb_writers#10 sb_writers#10 &rq->__lock irq_context: 0 sb_writers#10 sb_writers#10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 sb_writers#10 &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 sb_writers#10 &wb->list_lock irq_context: 0 sb_writers#10 sb_writers#10 &wb->list_lock &sb->s_type->i_lock_key#30 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#64 &c->lock irq_context: 0 kn->active#64 &n->list_lock irq_context: 0 kn->active#64 &n->list_lock &c->lock irq_context: 0 kn->active#64 &rq->__lock irq_context: 0 kn->active#64 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#64 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#64 rcu_read_lock &rq->__lock irq_context: 0 kn->active#64 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#64 fs_reclaim &rq->__lock irq_context: 0 kn->active#64 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &iint->mutex pool_lock#2 irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex &rnp->exp_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rcu_state.expedited_wq irq_context: 0 cb_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock batched_entropy_u8.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock kfence_freelist_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rnp->exp_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rnp->exp_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rnp->exp_wq[3] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &rnp->exp_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &mm->mmap_lock rcu_node_0 irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &bond->mode_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &data->lock irq_context: 0 cb_lock genl_mutex &pernet->lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &r->producer_lock#3 irq_context: 0 &xs->mutex &c->lock irq_context: 0 &xs->mutex init_mm.page_table_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &r->producer_lock#3 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pgd_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle key irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle percpu_counters_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &macvlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work rcu_node_0 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_PPPOX &dir->lock irq_context: 0 sk_lock-AF_PPPOX &pn->all_channels_lock irq_context: 0 sk_lock-AF_PPPOX &mm->mmap_lock irq_context: 0 sk_lock-AF_PPPOX &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_PPPOX &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_PPPOX rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_PPPOX &c->lock irq_context: 0 sk_lock-AF_PPPOX &n->list_lock irq_context: 0 sk_lock-AF_PPPOX &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PPPOX &rq->__lock irq_context: 0 sk_lock-AF_PPPOX batched_entropy_u8.lock irq_context: 0 sk_lock-AF_PPPOX kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &data->lock irq_context: 0 sk_lock-AF_PPPOX rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_PPPOX rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_PPPOX remove_cache_srcu irq_context: 0 sk_lock-AF_PPPOX remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_PPPOX remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_PPPOX remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_PPPOX remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_PPPOX remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_PPPOX remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_PPPOX remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_PPPOX &____s->seqcount#2 irq_context: 0 sk_lock-AF_PPPOX &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_PPPOX &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_PPPOX &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem &pch->downl irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->upl irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->all_channels_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pf->rwait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &list->lock#40 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &ei->i_prealloc_lock irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 integrity_iint_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xs->mutex umem_ida.xa_lock irq_context: 0 &xs->mutex &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xs->mutex &mm->mmap_lock &____s->seqcount irq_context: 0 &xs->mutex &mm->mmap_lock pool_lock#2 irq_context: 0 &xs->mutex &mm->mmap_lock stock_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &xs->mutex &mm->mmap_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &xs->mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &xs->mutex &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 &xs->mutex &sem->wait_lock irq_context: 0 &xs->mutex &p->pi_lock irq_context: 0 &xs->mutex &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &pcp->lock &zone->lock irq_context: 0 &xs->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex fs_reclaim &rq->__lock irq_context: 0 &xs->mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &cfs_rq->removed.lock irq_context: 0 &xs->mutex &obj_hash[i].lock irq_context: 0 &xs->mutex &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) irq_context: 0 (wq_completion)events (work_completion)(&umem->work) umem_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &lruvec->lru_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &xs->mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) rcu_node_0 irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 &xs->mutex &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 napi_hash_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond25 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 nl_table_lock nl_table_wait.lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bond->mcast_work)->timer irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &lock->wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &n->list_lock irq_context: 0 &xs->mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu &meta->lock irq_context: 0 rtnl_mutex remove_cache_srcu kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &zone->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &dev->power.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &xs->mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &xs->mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &net->xdp.lock &xs->mutex irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mii_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &xs->mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond24#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond25#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond26#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond28 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &____s->seqcount irq_context: 0 (wq_completion)bond29 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 irq_context: 0 (wq_completion)bond28#2 &rq->__lock irq_context: 0 (wq_completion)bond28#2 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond28#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond30#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond31 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex net_rwsem rcu_node_0 irq_context: 0 (wq_completion)bond32 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &br->hash_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &br->hash_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[3] irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &p->pi_lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_node_0 irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uts_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex (work_completion)(&ht->run_work) &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_node_0 irq_context: 0 rtnl_mutex remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 rtnl_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond9 &rq->__lock irq_context: 0 (wq_completion)bond9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnettable->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_node_0 irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: softirq rcu_read_lock rlock-AF_PACKET tk_core.seq.seqcount irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)bond48 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 batched_entropy_u8.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 kfence_freelist_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond49 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond51 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond52 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond53 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond56 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond50#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond51#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond52#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond53#2 irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond54#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond55#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond56#2 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_node_0 irq_context: 0 rtnl_mutex sysctl_lock rcu_read_lock &p->pi_lock irq_context: 0 rtnl_mutex sysctl_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex sysctl_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sysctl_lock rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &knet->mutex irq_context: 0 &knet->mutex &rq->__lock irq_context: 0 &mux->lock irq_context: 0 &mux->rx_lock irq_context: 0 (wq_completion)bond57#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM clock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&kcm->tx_work) irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&kcm->tx_work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->rx_lock rlock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 &knet->mutex irq_context: 0 (wq_completion)writeback rcu_node_0 irq_context: 0 (wq_completion)writeback &rcu_state.expedited_wq irq_context: 0 (wq_completion)writeback &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)writeback &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback &rq->__lock irq_context: 0 (wq_completion)writeback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 (wq_completion)bond59 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock irq_context: 0 rtnl_mutex (work_completion)(&br->mcast_gc_work) &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#7 &rcu_state.expedited_wq irq_context: 0 sb_writers#7 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#7 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#7 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &mm->page_table_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock ptlock_ptr(page) irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 (wq_completion)bond40 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)bond41 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock pcpu_lock stock_lock irq_context: 0 (wq_completion)bond16 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &n->list_lock &c->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &c->lock irq_context: 0 (wq_completion)bond19#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &____s->seqcount irq_context: 0 cb_lock rtnl_mutex fs_reclaim irq_context: 0 cb_lock rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 cb_lock rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &c->lock irq_context: 0 cb_lock rtnl_mutex &n->list_lock irq_context: 0 cb_lock rtnl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex nl_table_lock irq_context: 0 cb_lock rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex netpoll_srcu &rq->__lock irq_context: 0 rtnl_mutex netpoll_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK rcu_node_0 irq_context: 0 (wq_completion)bond87 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_VSOCK &____s->seqcount#2 irq_context: 0 sk_lock-AF_VSOCK &____s->seqcount irq_context: 0 sk_lock-AF_VSOCK &list->lock#41 irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) &list->lock#41 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) vsock_table_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK fs_reclaim irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &dir->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 pool_lock#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->tx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 vsock_table_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->rx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &list->lock#41 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &data->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &rq->__lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK &ei->socket.wq.wait irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &vvs->tx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 sk_lock-AF_VSOCK &vvs->tx_lock irq_context: 0 sk_lock-AF_VSOCK &zone->lock irq_context: 0 sk_lock-AF_VSOCK &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond90 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_VSOCK &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_VSOCK &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 sk_lock-AF_VSOCK &data->lock irq_context: 0 sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &list->lock#41 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &list->lock#41 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 clock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rlock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &base->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &zone->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &list->lock#41 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) &data->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)bond91 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem rcu_node_0 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 &meta->lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) &n->lock batched_entropy_u8.lock irq_context: softirq (&n->timer) &n->lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nfnl_subsys_ipset &rq->__lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rcu_read_lock console_owner_lock irq_context: 0 rcu_read_lock console_owner irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#3 irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &____s->seqcount irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq (&peer->timer_persistent_keepalive) &pcp->lock &zone->lock irq_context: softirq (&peer->timer_persistent_keepalive) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond95 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55#3 irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond56#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock kfence_freelist_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#3 irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 irq_context: 0 (wq_completion)bond85 &rq->__lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 bpf_devs_lock irq_context: 0 bpf_devs_lock fs_reclaim irq_context: 0 bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_devs_lock &rq->__lock irq_context: 0 bpf_devs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_devs_lock pool_lock#2 irq_context: 0 bpf_devs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond101 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &data->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex xfrm_state_gc_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events xfrm_state_gc_work irq_context: 0 (wq_completion)events xfrm_state_gc_work xfrm_state_gc_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events xfrm_state_gc_work &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work (&x->rtimer) irq_context: 0 (wq_completion)events xfrm_state_gc_work &base->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work pool_lock#2 irq_context: 0 rlock-AF_KEY irq_context: 0 (wq_completion)bond102 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work &rnp->exp_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond64 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond55#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond65 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 &rq->__lock irq_context: 0 (wq_completion)bond9#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 &data->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 &data->lock &base->lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &____s->seqcount#2 irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &____s->seqcount irq_context: 0 (wq_completion)bond67 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86 &rq->__lock irq_context: 0 (wq_completion)bond86 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock fill_pool_map-wait-type-override &c->lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock prog_idr_lock &pcp->lock &zone->lock irq_context: 0 lock prog_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond111 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond70 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond112 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond71 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond72 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond114 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond90#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond74 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 &rq->__lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_lock_key &dentry->d_lock/1 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond79 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond81 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem &rq->__lock &obj_hash[i].lock irq_context: 0 &ei->i_data_sem &rq->__lock &base->lock irq_context: 0 &ei->i_data_sem &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond83 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond126 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)bond128 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sk_lock-AF_RDS rds_trans_sem irq_context: 0 sk_lock-AF_RDS rds_trans_sem fs_reclaim irq_context: 0 sk_lock-AF_RDS rds_trans_sem fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_RDS rds_trans_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_RDS rds_trans_sem pool_lock#2 irq_context: 0 sk_lock-AF_RDS rds_trans_sem crngs.lock irq_context: 0 sk_lock-AF_RDS rds_trans_sem &id_priv->lock irq_context: 0 sk_lock-AF_RDS rds_trans_sem lock#7 irq_context: 0 sk_lock-AF_RDS rds_trans_sem &id_priv->handler_mutex irq_context: 0 sk_lock-AF_RDS rds_trans_sem &id_priv->handler_mutex &id_priv->lock irq_context: 0 sk_lock-AF_RDS rds_trans_sem id_table_lock irq_context: 0 sk_lock-AF_RDS rds_trans_sem &x->wait#27 irq_context: 0 sk_lock-AF_RDS rds_trans_sem &obj_hash[i].lock irq_context: 0 sk_lock-AF_RDS (console_sem).lock irq_context: 0 sk_lock-AF_RDS console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_RDS console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_RDS console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_RDS console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond87#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond88#3 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)bond89#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond133 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 oom_adj_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond92#3 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex ip_vs_sched_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex fs_reclaim irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex stock_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex cpu_hotplug_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex fs_reclaim irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &c->lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &____s->seqcount irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pcpu_alloc_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &n->list_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &n->list_lock &c->lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex fs_reclaim irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex kthread_create_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &x->wait irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &pool->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex __ip_vs_mutex.wait_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex.wait_lock irq_context: 0 &smc->clcsock_release_lock &p->pi_lock irq_context: 0 &smc->clcsock_release_lock &p->pi_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wq#4 irq_context: 0 rcu_read_lock &s->lock irq_context: 0 __ip_vs_mutex ipvs->est_mutex irq_context: 0 __ip_vs_mutex rcu_node_0 irq_context: 0 __ip_vs_mutex &rcu_state.expedited_wq irq_context: 0 __ip_vs_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 __ip_vs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 __ip_vs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex fs_reclaim irq_context: 0 __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 __ip_vs_mutex pool_lock#2 irq_context: 0 (wq_completion)bond136 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 prog_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 prog_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 prog_idr_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 prog_idr_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 prog_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 (wq_completion)bond95#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock fs_reclaim irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock &____s->seqcount irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock stock_lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond97#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rcu_state.expedited_wq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex crngs.lock base_crng.lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &pcp->lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem quarantine_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &sem->waiters rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)bond143 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 (wq_completion)bond102#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond146 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond40#2 irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 (wq_completion)bond94#3 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex &mm->mmap_lock pool_lock#2 irq_context: 0 (wq_completion)bond147 irq_context: 0 (wq_completion)bond147 &rq->__lock irq_context: 0 (wq_completion)bond147 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#2 irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond42#2 irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&ifmgd->monitor_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&ifmgd->tdls_peer_del_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&ifmgd->tdls_peer_del_work)->work) &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifmgd->teardown_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifmgd->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx fs_reclaim &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &data->mutex irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->iflist_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->filter_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &lock->wait_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &lock->wait_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->iflist_mtx irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &data->mutex irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx nl_table_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx nl_table_wait.lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &list->lock#16 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->filter_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx nl_table_wait.lock irq_context: 0 (wq_completion)phy11 (work_completion)(&ifmgd->monitor_work) irq_context: 0 (wq_completion)phy11 (work_completion)(&ifmgd->monitor_work) &wdev->mtx irq_context: 0 (wq_completion)bond149 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond43 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->conn_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond150 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond45 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx &c->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->scan_work)->work) &local->mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)bond109#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond110#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond47#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)bond104#2 &rq->__lock irq_context: 0 (wq_completion)bond104#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &br->hash_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond153 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond114#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock &rq->__lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &ht->mutex quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond115#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond158 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 irq_context: 0 (wq_completion)bond159 &rq->__lock irq_context: 0 (wq_completion)bond159 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond118#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond160 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)bond100#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond101#3 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#3 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#3 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &ei->socket.wq.wait irq_context: 0 (wq_completion)bond165 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock#3 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 lock#3 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) quarantine_lock irq_context: 0 (wq_completion)bond167 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &ei->i_data_sem rcu_node_0 irq_context: 0 rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 (wq_completion)bond126#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 irq_context: 0 (wq_completion)bond170 &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle kfence_freelist_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond131#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#3 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->arp_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond175 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#3 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond136#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond178 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex net_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond140#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#3 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond144#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 &rq->__lock irq_context: 0 (wq_completion)bond147#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond116#3 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock pool_lock#2 irq_context: 0 (wq_completion)bond149#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond194 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond154#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) pool_lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) stock_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond197 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)bond200 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond160#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 &rq->__lock irq_context: 0 (wq_completion)bond183 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex noop_qdisc.q.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &pcp->lock &zone->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex batched_entropy_u8.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kfence_freelist_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &meta->lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pgd_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pcpu_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle percpu_counters_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pcpu_lock stock_lock irq_context: 0 (wq_completion)bond163#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond204 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond206 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond126#3 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond127#3 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 &rq->__lock irq_context: 0 (wq_completion)bond186 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#3 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &xs->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond209 irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond129#3 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond210 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->alb_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond211 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 irq_context: 0 (wq_completion)bond77#2 &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#2 rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 irq_context: 0 (wq_completion)bond133#3 &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#2 irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 &rq->__lock irq_context: 0 (wq_completion)bond173#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#3 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond175#2 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 irq_context: 0 (wq_completion)bond215 &rq->__lock irq_context: 0 (wq_completion)bond215 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond217 irq_context: 0 (wq_completion)bond217 &rq->__lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 &rq->__lock irq_context: 0 (wq_completion)bond175#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#2 irq_context: 0 (wq_completion)bond80#2 &rq->__lock irq_context: 0 (wq_completion)bond80#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond182#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond222 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond142#3 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond82#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond224 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond225 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 batched_entropy_u8.lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 kfence_freelist_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond226 irq_context: 0 (wq_completion)bond226 &rq->__lock irq_context: 0 (wq_completion)bond226 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#2 irq_context: 0 (wq_completion)bond186#2 &rq->__lock irq_context: 0 (wq_completion)bond186#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 &rq->__lock irq_context: 0 (wq_completion)bond176#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond187#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 &rq->__lock irq_context: 0 (wq_completion)bond142#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond188#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond148#3 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond85#3 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond149#3 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 &rq->__lock irq_context: 0 (wq_completion)bond216 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 irq_context: 0 (wq_completion)bond150#3 &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond231 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 &rq->__lock irq_context: 0 (wq_completion)bond184#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#3 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 &rq->__lock irq_context: 0 (wq_completion)bond231 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#2 irq_context: 0 (wq_completion)bond191#2 &rq->__lock irq_context: 0 (wq_completion)bond191#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond192#2 irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond152#3 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond193#2 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#3 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 &rq->__lock irq_context: 0 (wq_completion)bond211 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194#2 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 &rq->__lock irq_context: 0 (wq_completion)bond222 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bond->arp_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#4 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#2 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond155#3 irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 irq_context: 0 (wq_completion)bond196#2 &rq->__lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 &rq->__lock irq_context: 0 (wq_completion)bond129#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#4 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#3 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->alb_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond238 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236 &rq->__lock irq_context: 0 (wq_completion)bond236 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &meta->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#3 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199#2 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mii_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)bond240 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->alb_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#3 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond201#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#4 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#3 &rq->__lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond161#3 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond202#2 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207 &rq->__lock irq_context: 0 (wq_completion)bond94#4 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 &rq->__lock irq_context: 0 (wq_completion)bond221 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 &rq->__lock irq_context: 0 (wq_completion)bond192#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond163#3 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 &rq->__lock irq_context: 0 (wq_completion)bond75#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond244 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond194#2 &rq->__lock irq_context: 0 (wq_completion)bond194#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond205#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->arp_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#3 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock quarantine_lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &n->list_lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond246 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 &rq->__lock irq_context: 0 (wq_completion)bond149#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &br->lock &____s->seqcount irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207#2 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 &rq->__lock irq_context: 0 (wq_completion)bond91#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond248 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond208#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 &rq->__lock irq_context: 0 (wq_completion)bond94#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 &rq->__lock irq_context: 0 uevent_sock_mutex fs_reclaim irq_context: 0 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#3 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &data->lock irq_context: 0 (wq_completion)bond249 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex &data->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#3 irq_context: 0 (wq_completion)bond169#3 &rq->__lock irq_context: 0 (wq_completion)bond169#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond250 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#3 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle key#28 irq_context: 0 (wq_completion)bond211#2 irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252 irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#3 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 &rq->__lock irq_context: 0 (wq_completion)bond243 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond253 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond213#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#3 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond254 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond174#3 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond170#3 &rq->__lock irq_context: 0 (wq_completion)bond170#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond256 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#3 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 irq_context: 0 (wq_completion)bond257 &rq->__lock irq_context: 0 (wq_completion)bond257 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond257 &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217#2 irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 &rq->__lock irq_context: 0 (wq_completion)bond174#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218#2 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 &rq->__lock irq_context: 0 (wq_completion)bond258 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond219#2 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 &rq->__lock irq_context: 0 (wq_completion)bond246 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 &rq->__lock irq_context: 0 (wq_completion)bond137#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220#2 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#3 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mii_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond260 irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221#2 irq_context: 0 (wq_completion)bond221#2 &rq->__lock irq_context: 0 (wq_completion)bond221#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181#3 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond261 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222#2 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#3 irq_context: 0 (wq_completion)bond182#3 &rq->__lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262 irq_context: 0 (wq_completion)bond262 &rq->__lock irq_context: 0 (wq_completion)bond262 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223#2 irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond183#3 irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 &rq->__lock irq_context: 0 (wq_completion)bond178#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond263 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207#2 &rq->__lock irq_context: 0 (wq_completion)bond207#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#3 irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond264 irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#4 &rq->__lock irq_context: 0 (wq_completion)bond95#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#3 irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226#2 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 lock#3 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#3 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond266 irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227#2 irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#3 &rq->__lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#3 irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem pgd_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem stock_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem key irq_context: 0 rtnl_mutex &root->kernfs_rwsem pcpu_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 (wq_completion)bond228#2 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#3 irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond268 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &base->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond229#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#3 irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond269 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond201#2 &rq->__lock irq_context: 0 (wq_completion)bond201#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230#2 irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#3 irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219#2 &rq->__lock irq_context: 0 (wq_completion)bond219#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond270 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#2 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond260 &rq->__lock irq_context: 0 (wq_completion)bond260 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond267 &rq->__lock irq_context: 0 (wq_completion)bond267 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond191#3 irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond103#4 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#2 &rq->__lock irq_context: 0 (wq_completion)bond145#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond272 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193#3 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond273 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond273 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond273 &rq->__lock irq_context: 0 (wq_completion)bond273 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#2 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 &rq->__lock irq_context: 0 (wq_completion)bond103#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#3 irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond274 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#2 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 &rq->__lock irq_context: 0 (wq_completion)bond102#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond236#2 irq_context: 0 (wq_completion)bond236#2 &rq->__lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196#3 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond276 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237#2 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197#3 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond277 irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238#2 irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#3 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond278 irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239#2 irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond105#4 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199#3 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 irq_context: 0 (wq_completion)bond279 &rq->__lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond240#2 irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond200#3 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 &rq->__lock irq_context: 0 (wq_completion)bond212#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond241#2 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond201#3 irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond201#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond255 &rq->__lock irq_context: 0 (wq_completion)bond255 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond242#2 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond281 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond202#3 irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224 &rq->__lock irq_context: 0 (wq_completion)bond224 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond259 &rq->__lock irq_context: 0 (wq_completion)bond259 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond211#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#3 irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond244#2 irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond283 irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond204#3 irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond204#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond245 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 &rq->__lock irq_context: 0 (wq_completion)bond235#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233#2 &rq->__lock irq_context: 0 (wq_completion)bond233#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond284 irq_context: 0 (wq_completion)bond284 &rq->__lock irq_context: 0 (wq_completion)bond284 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond205#3 irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 &rq->__lock irq_context: 0 (wq_completion)bond281 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#4 irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond146#3 &rq->__lock irq_context: 0 (wq_completion)bond146#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond206#3 irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239#2 &rq->__lock irq_context: 0 (wq_completion)bond239#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond247#2 irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#4 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271 &rq->__lock irq_context: 0 (wq_completion)bond271 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond6 &rq->__lock irq_context: 0 (wq_completion)bond6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207#3 irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond286 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#3 irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155#3 &rq->__lock irq_context: 0 (wq_completion)bond155#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#4 irq_context: 0 (wq_completion)bond110#4 &rq->__lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond287 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#3 &rq->__lock irq_context: 0 (wq_completion)bond191#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#3 irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250#2 irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 nf_sockopt_mutex rcu_node_0 irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond217#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#4 irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond288 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210#2 irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond251#2 irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond289 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond207#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond211#3 irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252#2 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#4 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond290 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond212#3 irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond253#2 irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#4 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond291 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#3 irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond213#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond212#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond214#3 irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond115#4 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond255#2 irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->alb_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond293 irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pgd_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pcpu_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock percpu_counters_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)bond215#3 irq_context: 0 (wq_completion)bond215#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond215#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond215#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond205#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond215#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond215#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256#2 irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond214#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216#3 irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257#2 irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond206#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond251#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond216#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond295 irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#3 irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond217#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 nlk_cb_mutex-NETFILTER &____s->seqcount#2 irq_context: 0 nlk_cb_mutex-NETFILTER &____s->seqcount irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond177#2 &rq->__lock irq_context: 0 (wq_completion)bond177#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258#2 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond296 irq_context: 0 (wq_completion)bond296 &rq->__lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#3 irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 &rq->__lock irq_context: 0 (wq_completion)bond180#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond259#2 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock krc.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 rcu_read_lock &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 rcu_read_lock &dev_addr_list_lock_key/1 rcu_read_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)bond219#3 irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond219#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond286 &rq->__lock irq_context: 0 (wq_completion)bond286 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex &rq->__lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond191#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_